hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3f6889527f2554933facf4a7e407dd85a8fc1e14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@gmail.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "WeightFactory.h" #include "Global.h" #include "cublasWrapper.h" #include "utils.h" namespace cytonLib { WeightFactory weightFactory; void WeightFactory::init(const string& method) { if(method=="adam") { optAdam=true; adamGamma=0.9; adamGamma2=0.999999; adamEpsilon=1e-9; } else if(method=="SGD") { optSgd=true; } else { assert(false); } } void WeightFactory::create(Weight& weight, string tag, int ni, int nj) { weight.create(tag, ni, nj); weights.push_back(&weight); } void WeightFactory::alloc(Precision clipGradient) { int length=0; for(int i=0;i<weights.size();i++) { Weight& w=*weights.at(i); XLLib::printfln(global.os, "weight%d %s %d*%d", i, w.tag.c_str(), w.ni, w.nj); length+=w.length(); } whole.resize(length, 1); whole.clipGrad=clipGradient; XLLib::printfln(global.os, "totalWeight %d",length); int offset=0; for(vector<Weight*>::iterator iw=weights.begin();iw!=weights.end();iw++) { Weight& w=*(*iw); w.set(w.ni, w.ni, w.nj, whole.data+offset, whole.grad.data+offset); offset+=w.length(); } whole.initRandom(-global.initFactor, global.initFactor); if(optAdam) { momentum.resize(whole.ni, whole.nj); momentum.setZero(); gradientVariance.resize(whole.ni, whole.nj); gradientVariance.setZero(); dWeight.resize(whole.ni, whole.nj); } else if(optSgd) { } else { assert(false); } } void WeightFactory::clearGrad() { whole.grad.setZero(); } __global__ void weightFactory_update_adam(Precision* grad, Precision* gradMomentum, Precision* gradVar, Precision* weight, Precision* dWeight, int len, Precision gamma,Precision gamma2, Precision epsilon, Precision lambda ) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { Precision& g=grad[i]; Precision& gm=gradMomentum[i]; Precision& gv=gradVar[i]; Precision& w=weight[i]; Precision& dw=dWeight[i]; gm=(1-gamma)*g+gamma*gm; gv=(1-gamma2)*g*g+gamma2*gv; dw= gm/(sqrt(gv)+epsilon)*lambda; w += dw; } } void WeightFactory::update(Precision lambda) { int len=whole.length(); Precision pnFactor=sqrt(1.0/whole.length()); if(whole.clipGrad>0) { whole.grad.clip(whole.clipGrad); } if(optAdam) { Precision step=global.batch; Precision tf=sqrt(1.0-::pow(adamGamma2, step)) / (1.0-::pow(adamGamma, step)); hipLaunchKernelGGL(( weightFactory_update_adam), dim3(ceil(len, blockSize)), dim3(blockSize), 0, 0, whole.grad.data, momentum.data, gradientVariance.data, whole.data, dWeight.data, len, adamGamma, adamGamma2, adamEpsilon, lambda*tf); } else if(optSgd) { checkError(cublasXaxpy(global.cublasHandle, whole.length(), &lambda, whole.grad.data, 1, whole.data, 1)); } else { assert(false); } } void WeightFactory::save(const string& fileName) { XLLib::dirPrepare4file(fileName); std::ofstream f(fileName.c_str()); f<<"##"<<"WeightFactory"<<"\n"; whole.save(f); f.close(); } void WeightFactory::load(const string& fileName) { if(!XLLib::fileExists(fileName)) { XLLib::printfln("Error: model file %s does not exist.", fileName.c_str()); assert(false); } ifstream f(fileName.c_str()); string tTag=string("##WeightFactory"); checkFile(f,tTag); whole.load(f); f.close(); } } /* namespace cytonLib */
3f6889527f2554933facf4a7e407dd85a8fc1e14.cu
/* Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@gmail.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "WeightFactory.h" #include "Global.h" #include "cublasWrapper.h" #include "utils.h" namespace cytonLib { WeightFactory weightFactory; void WeightFactory::init(const string& method) { if(method=="adam") { optAdam=true; adamGamma=0.9; adamGamma2=0.999999; adamEpsilon=1e-9; } else if(method=="SGD") { optSgd=true; } else { assert(false); } } void WeightFactory::create(Weight& weight, string tag, int ni, int nj) { weight.create(tag, ni, nj); weights.push_back(&weight); } void WeightFactory::alloc(Precision clipGradient) { int length=0; for(int i=0;i<weights.size();i++) { Weight& w=*weights.at(i); XLLib::printfln(global.os, "weight%d %s %d*%d", i, w.tag.c_str(), w.ni, w.nj); length+=w.length(); } whole.resize(length, 1); whole.clipGrad=clipGradient; XLLib::printfln(global.os, "totalWeight %d",length); int offset=0; for(vector<Weight*>::iterator iw=weights.begin();iw!=weights.end();iw++) { Weight& w=*(*iw); w.set(w.ni, w.ni, w.nj, whole.data+offset, whole.grad.data+offset); offset+=w.length(); } whole.initRandom(-global.initFactor, global.initFactor); if(optAdam) { momentum.resize(whole.ni, whole.nj); momentum.setZero(); gradientVariance.resize(whole.ni, whole.nj); gradientVariance.setZero(); dWeight.resize(whole.ni, whole.nj); } else if(optSgd) { } else { assert(false); } } void WeightFactory::clearGrad() { whole.grad.setZero(); } __global__ void weightFactory_update_adam(Precision* grad, Precision* gradMomentum, Precision* gradVar, Precision* weight, Precision* dWeight, int len, Precision gamma,Precision gamma2, Precision epsilon, Precision lambda ) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<len) { Precision& g=grad[i]; Precision& gm=gradMomentum[i]; Precision& gv=gradVar[i]; Precision& w=weight[i]; Precision& dw=dWeight[i]; gm=(1-gamma)*g+gamma*gm; gv=(1-gamma2)*g*g+gamma2*gv; dw= gm/(sqrt(gv)+epsilon)*lambda; w += dw; } } void WeightFactory::update(Precision lambda) { int len=whole.length(); Precision pnFactor=sqrt(1.0/whole.length()); if(whole.clipGrad>0) { whole.grad.clip(whole.clipGrad); } if(optAdam) { Precision step=global.batch; Precision tf=sqrt(1.0-std::pow(adamGamma2, step)) / (1.0-std::pow(adamGamma, step)); weightFactory_update_adam<<<ceil(len, blockSize), blockSize>>>(whole.grad.data, momentum.data, gradientVariance.data, whole.data, dWeight.data, len, adamGamma, adamGamma2, adamEpsilon, lambda*tf); } else if(optSgd) { checkError(cublasXaxpy(global.cublasHandle, whole.length(), &lambda, whole.grad.data, 1, whole.data, 1)); } else { assert(false); } } void WeightFactory::save(const string& fileName) { XLLib::dirPrepare4file(fileName); std::ofstream f(fileName.c_str()); f<<"##"<<"WeightFactory"<<"\n"; whole.save(f); f.close(); } void WeightFactory::load(const string& fileName) { if(!XLLib::fileExists(fileName)) { XLLib::printfln("Error: model file %s does not exist.", fileName.c_str()); assert(false); } ifstream f(fileName.c_str()); string tTag=string("##WeightFactory"); checkFile(f,tTag); whole.load(f); f.close(); } } /* namespace cytonLib */
50ca0719f68f7255e5c7c298f83eb04707c126c9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <hip/hip_runtime.h> // added #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; //#define BENCH_PRINT void init(int argc, char** argv) { int devID = 0; if(argc==5){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); devID=atoi(argv[4]); }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } printf("select device : %d\n", devID); hipSetDevice(devID); hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess){ printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); }else{ printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT /* for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } */ #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { // cke int num_streams = 2; hipStream_t *streams = (hipStream_t *) malloc(num_streams * sizeof(hipStream_t)); for (int i = 0; i < num_streams; i++) hipStreamCreate(&(streams[i])); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; for(int sid=0; sid < num_streams; sid++) { hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, streams[sid], MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } } for (int i = 0; i < num_streams; i++) hipStreamDestroy(streams[i]); return dst; } int main(int argc, char** argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; hipMalloc((void**)&gpuResult[0], sizeof(int)*cols); hipMalloc((void**)&gpuResult[1], sizeof(int)*cols); hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice); hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost); #ifdef BENCH_PRINT /* for (int i = 0; i < cols; i++) printf("%d ",data[i]) ; printf("\n") ; for (int i = 0; i < cols; i++) printf("%d ",result[i]) ; printf("\n") ; */ #endif hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
50ca0719f68f7255e5c7c298f83eb04707c126c9.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <cuda.h> // added #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; //#define BENCH_PRINT void init(int argc, char** argv) { int devID = 0; if(argc==5){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); devID=atoi(argv[4]); }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } printf("select device : %d\n", devID); cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess){ printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); }else{ printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT /* for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%d ",wall[i][j]) ; } printf("\n") ; } */ #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { // cke int num_streams = 2; cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); for (int i = 0; i < num_streams; i++) cudaStreamCreate(&(streams[i])); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; for(int sid=0; sid < num_streams; sid++) { dynproc_kernel<<<dimGrid, dimBlock, 0, streams[sid]>>>( MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } } for (int i = 0; i < num_streams; i++) cudaStreamDestroy(streams[i]); return dst; } int main(int argc, char** argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols); cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols); cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice); cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost); #ifdef BENCH_PRINT /* for (int i = 0; i < cols; i++) printf("%d ",data[i]) ; printf("\n") ; for (int i = 0; i < cols; i++) printf("%d ",result[i]) ; printf("\n") ; */ #endif cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
6f0a0d3d21f23369148057186d326446f4bdccc4.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <Array.hpp> #include <copy.hpp> #include <debug_cuda.hpp> #include <thrust_utils.hpp> #include <set.hpp> #include <sort.hpp> #include <af/dim4.hpp> #include <thrust/device_ptr.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <algorithm> namespace cuda { using af::dim4; template<typename T> Array<T> setUnique(const Array<T> &in, const bool is_sorted) { Array<T> out = copyArray<T>(in); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_end = out_ptr + out.elements(); if (!is_sorted) THRUST_SELECT(thrust::sort, out_ptr, out_ptr_end); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::unique, out_ptr, out_ptr_end); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } template<typename T> Array<T> setUnion(const Array<T> &first, const Array<T> &second, const bool is_unique) { Array<T> unique_first = first; Array<T> unique_second = second; if (!is_unique) { unique_first = setUnique(first, false); unique_second = setUnique(second, false); } dim_t out_size = unique_first.elements() + unique_second.elements(); Array<T> out = createEmptyArray<T>(dim4(out_size)); thrust::device_ptr<T> first_ptr = thrust::device_pointer_cast<T>(unique_first.get()); thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements(); thrust::device_ptr<T> second_ptr = thrust::device_pointer_cast<T>(unique_second.get()); thrust::device_ptr<T> second_ptr_end = second_ptr + unique_second.elements(); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::set_union, first_ptr, first_ptr_end, second_ptr, second_ptr_end, out_ptr); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } template<typename T> Array<T> setIntersect(const Array<T> &first, const Array<T> &second, const bool is_unique) { Array<T> unique_first = first; Array<T> unique_second = second; if (!is_unique) { unique_first = setUnique(first, false); unique_second = setUnique(second, false); } dim_t out_size = ::max(unique_first.elements(), unique_second.elements()); Array<T> out = createEmptyArray<T>(dim4(out_size)); thrust::device_ptr<T> first_ptr = thrust::device_pointer_cast<T>(unique_first.get()); thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements(); thrust::device_ptr<T> second_ptr = thrust::device_pointer_cast<T>(unique_second.get()); thrust::device_ptr<T> second_ptr_end = second_ptr + unique_second.elements(); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::set_intersection, first_ptr, first_ptr_end, second_ptr, second_ptr_end, out_ptr); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } #define INSTANTIATE(T) \ template Array<T> setUnique<T>(const Array<T> &in, const bool is_sorted); \ template Array<T> setUnion<T>( \ const Array<T> &first, const Array<T> &second, const bool is_unique); \ template Array<T> setIntersect<T>( \ const Array<T> &first, const Array<T> &second, const bool is_unique); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(char) INSTANTIATE(uchar) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(intl) INSTANTIATE(uintl) } // namespace cuda
6f0a0d3d21f23369148057186d326446f4bdccc4.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <Array.hpp> #include <copy.hpp> #include <debug_cuda.hpp> #include <thrust_utils.hpp> #include <set.hpp> #include <sort.hpp> #include <af/dim4.hpp> #include <thrust/device_ptr.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <algorithm> namespace cuda { using af::dim4; template<typename T> Array<T> setUnique(const Array<T> &in, const bool is_sorted) { Array<T> out = copyArray<T>(in); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_end = out_ptr + out.elements(); if (!is_sorted) THRUST_SELECT(thrust::sort, out_ptr, out_ptr_end); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::unique, out_ptr, out_ptr_end); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } template<typename T> Array<T> setUnion(const Array<T> &first, const Array<T> &second, const bool is_unique) { Array<T> unique_first = first; Array<T> unique_second = second; if (!is_unique) { unique_first = setUnique(first, false); unique_second = setUnique(second, false); } dim_t out_size = unique_first.elements() + unique_second.elements(); Array<T> out = createEmptyArray<T>(dim4(out_size)); thrust::device_ptr<T> first_ptr = thrust::device_pointer_cast<T>(unique_first.get()); thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements(); thrust::device_ptr<T> second_ptr = thrust::device_pointer_cast<T>(unique_second.get()); thrust::device_ptr<T> second_ptr_end = second_ptr + unique_second.elements(); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::set_union, first_ptr, first_ptr_end, second_ptr, second_ptr_end, out_ptr); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } template<typename T> Array<T> setIntersect(const Array<T> &first, const Array<T> &second, const bool is_unique) { Array<T> unique_first = first; Array<T> unique_second = second; if (!is_unique) { unique_first = setUnique(first, false); unique_second = setUnique(second, false); } dim_t out_size = std::max(unique_first.elements(), unique_second.elements()); Array<T> out = createEmptyArray<T>(dim4(out_size)); thrust::device_ptr<T> first_ptr = thrust::device_pointer_cast<T>(unique_first.get()); thrust::device_ptr<T> first_ptr_end = first_ptr + unique_first.elements(); thrust::device_ptr<T> second_ptr = thrust::device_pointer_cast<T>(unique_second.get()); thrust::device_ptr<T> second_ptr_end = second_ptr + unique_second.elements(); thrust::device_ptr<T> out_ptr = thrust::device_pointer_cast<T>(out.get()); thrust::device_ptr<T> out_ptr_last; THRUST_SELECT_OUT(out_ptr_last, thrust::set_intersection, first_ptr, first_ptr_end, second_ptr, second_ptr_end, out_ptr); out.resetDims(dim4(thrust::distance(out_ptr, out_ptr_last))); return out; } #define INSTANTIATE(T) \ template Array<T> setUnique<T>(const Array<T> &in, const bool is_sorted); \ template Array<T> setUnion<T>( \ const Array<T> &first, const Array<T> &second, const bool is_unique); \ template Array<T> setIntersect<T>( \ const Array<T> &first, const Array<T> &second, const bool is_unique); INSTANTIATE(float) INSTANTIATE(double) INSTANTIATE(int) INSTANTIATE(uint) INSTANTIATE(char) INSTANTIATE(uchar) INSTANTIATE(short) INSTANTIATE(ushort) INSTANTIATE(intl) INSTANTIATE(uintl) } // namespace cuda
367b1c42cf2f6cb7f79b7a4b952b41048967c117.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 Saurabh Yadav // // This software is released under the MIT License. // https://opensource.org/licenses/MIT #include <iostream> #include <math.h> #include <hip/hip_runtime.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
367b1c42cf2f6cb7f79b7a4b952b41048967c117.cu
// Copyright (c) 2020 Saurabh Yadav // // This software is released under the MIT License. // https://opensource.org/licenses/MIT #include <iostream> #include <math.h> #include <cuda_runtime.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
5ee32f87fd1dcaedd276a3af10d17b26cdf0d0da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaDeltaUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const T rho, const T epsilon) { const auto grad = reinterpret_cast<const T*>(vx); const auto initMsg= reinterpret_cast<const T*>(vinMsg); const auto initMsdx = reinterpret_cast<const T*>(vinMsdx); auto up = reinterpret_cast<T*>(vz); auto stMsg = reinterpret_cast<T*>(vstMsg); auto stMsdx = reinterpret_cast<T*>(vstMsdx); __shared__ Nd4jLong xLen; __shared__ T rhoT; __shared__ bool bEWS, bOrdering, bXZsame, bXInMsgSame, bXStMsgSame, bXInMsdxSame, bXStMsdxSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); rhoT = (1 - rho); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stMsgShapeInfo) && 1 == shape::elementWiseStride(inMsgShapeInfo) && 1 == shape::elementWiseStride(stMsdxShapeInfo) && 1 == shape::elementWiseStride(inMsdxShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stMsgShapeInfo) && shape::order(stMsgShapeInfo) == shape::order(inMsgShapeInfo) && shape::order(inMsgShapeInfo) == shape::order(stMsdxShapeInfo) && shape::order(stMsdxShapeInfo) == shape::order(inMsdxShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsgShapeInfo); bXStMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsgShapeInfo); bXInMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsdxShapeInfo); bXStMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsdxShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMsgOffset = i, initMsdxOffset = i, stMsgOffset = i, stMsdxOffset = i; if (!bEWS || !bOrdering){ shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initMsgOffset = bXInMsgSame ? xOffset : shape::getOffset(inMsgShapeInfo, coords); stMsgOffset = bXStMsgSame ? xOffset : shape::getOffset(stMsgShapeInfo, coords); initMsdxOffset = bXInMsdxSame ? xOffset : shape::getOffset(inMsdxShapeInfo, coords); stMsdxOffset = bXStMsdxSame ? xOffset : shape::getOffset(stMsdxShapeInfo, coords); } stMsg[stMsgOffset] = rho * initMsg[initMsgOffset] + grad[xOffset] * grad[xOffset] * rhoT; up[zOffset] = grad[xOffset] * (sd::math::nd4j_sqrt<T, T>(initMsdx[initMsdxOffset] + epsilon) / sd::math::nd4j_sqrt<T, T>(stMsg[stMsgOffset] + epsilon)); stMsdx[stMsdxOffset] = rho * initMsdx[initMsdxOffset] + up[zOffset] * up[zOffset] * rhoT; } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaDeltaUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const double dRho, const double dEpsilon) { const T rho = static_cast<T>(dRho); const T epsilon = static_cast<T>(dEpsilon); hipLaunchKernelGGL(( adaDeltaUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, * stream, vx, xShapeInfo, vinMsg, inMsgShapeInfo, vinMsdx, inMsdxShapeInfo, vz, zShapeInfo, vstMsg, stMsgShapeInfo, vstMsdx, stMsdxShapeInfo, rho, epsilon); } /////////////////////////////////////////////////////////////////// void updaterAdaDelta(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateMsg, const NDArray& initStateMsdx, NDArray& update, NDArray& stateMsg, NDArray& stateMsdx, const double dRho, const double dEpsilon) { PointersManager manager(context, "adaDeltaUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaDeltaUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateMsg.specialBuffer(), initStateMsg.specialShapeInfo(), initStateMsdx.specialBuffer(), initStateMsdx.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(),stateMsg.specialBuffer(), stateMsg.specialShapeInfo(), stateMsdx.specialBuffer(), stateMsdx.specialShapeInfo(), dRho, dEpsilon), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx }); manager.synchronize(); } } } }
5ee32f87fd1dcaedd276a3af10d17b26cdf0d0da.cu
/******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaDeltaUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const T rho, const T epsilon) { const auto grad = reinterpret_cast<const T*>(vx); const auto initMsg= reinterpret_cast<const T*>(vinMsg); const auto initMsdx = reinterpret_cast<const T*>(vinMsdx); auto up = reinterpret_cast<T*>(vz); auto stMsg = reinterpret_cast<T*>(vstMsg); auto stMsdx = reinterpret_cast<T*>(vstMsdx); __shared__ Nd4jLong xLen; __shared__ T rhoT; __shared__ bool bEWS, bOrdering, bXZsame, bXInMsgSame, bXStMsgSame, bXInMsdxSame, bXStMsdxSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); rhoT = (1 - rho); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stMsgShapeInfo) && 1 == shape::elementWiseStride(inMsgShapeInfo) && 1 == shape::elementWiseStride(stMsdxShapeInfo) && 1 == shape::elementWiseStride(inMsdxShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stMsgShapeInfo) && shape::order(stMsgShapeInfo) == shape::order(inMsgShapeInfo) && shape::order(inMsgShapeInfo) == shape::order(stMsdxShapeInfo) && shape::order(stMsdxShapeInfo) == shape::order(inMsdxShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsgShapeInfo); bXStMsgSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsgShapeInfo); bXInMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, inMsdxShapeInfo); bXStMsdxSame = shape::haveSameShapeAndStrides(xShapeInfo, stMsdxShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMsgOffset = i, initMsdxOffset = i, stMsgOffset = i, stMsdxOffset = i; if (!bEWS || !bOrdering){ shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initMsgOffset = bXInMsgSame ? xOffset : shape::getOffset(inMsgShapeInfo, coords); stMsgOffset = bXStMsgSame ? xOffset : shape::getOffset(stMsgShapeInfo, coords); initMsdxOffset = bXInMsdxSame ? xOffset : shape::getOffset(inMsdxShapeInfo, coords); stMsdxOffset = bXStMsdxSame ? xOffset : shape::getOffset(stMsdxShapeInfo, coords); } stMsg[stMsgOffset] = rho * initMsg[initMsgOffset] + grad[xOffset] * grad[xOffset] * rhoT; up[zOffset] = grad[xOffset] * (sd::math::nd4j_sqrt<T, T>(initMsdx[initMsdxOffset] + epsilon) / sd::math::nd4j_sqrt<T, T>(stMsg[stMsgOffset] + epsilon)); stMsdx[stMsdxOffset] = rho * initMsdx[initMsdxOffset] + up[zOffset] * up[zOffset] * rhoT; } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaDeltaUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vinMsg, const Nd4jLong* inMsgShapeInfo, const void* vinMsdx, const Nd4jLong* inMsdxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstMsg, const Nd4jLong* stMsgShapeInfo, void* vstMsdx, const Nd4jLong* stMsdxShapeInfo, const double dRho, const double dEpsilon) { const T rho = static_cast<T>(dRho); const T epsilon = static_cast<T>(dEpsilon); adaDeltaUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, 256, * stream>>>(vx, xShapeInfo, vinMsg, inMsgShapeInfo, vinMsdx, inMsdxShapeInfo, vz, zShapeInfo, vstMsg, stMsgShapeInfo, vstMsdx, stMsdxShapeInfo, rho, epsilon); } /////////////////////////////////////////////////////////////////// void updaterAdaDelta(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateMsg, const NDArray& initStateMsdx, NDArray& update, NDArray& stateMsg, NDArray& stateMsdx, const double dRho, const double dEpsilon) { PointersManager manager(context, "adaDeltaUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaDeltaUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateMsg.specialBuffer(), initStateMsg.specialShapeInfo(), initStateMsdx.specialBuffer(), initStateMsdx.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(),stateMsg.specialBuffer(), stateMsg.specialShapeInfo(), stateMsdx.specialBuffer(), stateMsdx.specialShapeInfo(), dRho, dEpsilon), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateMsg, &stateMsdx }, { &gradient, &initStateMsg, &initStateMsdx }); manager.synchronize(); } } } }
edc5860ac7ff2bb81910b1ef24d68360bbf68ce8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star2d1r-256-10-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
edc5860ac7ff2bb81910b1ef24d68360bbf68ce8.cu
#include "star2d1r-256-10-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((0.1873f * (__REGREF(__a, 0))) + (0.1876f * (__SBREF(__b_sb, -1)))) + (0.2500f * (__REGREF(__b, 0)))) + (0.1877f * (__SBREF(__b_sb, 1)))) + (0.1874f * (__REGREF(__c, 0)))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
51f3a6090edf931182cebcf9d1c786b06cf2aa00.hip
// !!! This is a file automatically generated by hipify!!! /** * Yuri Gorokhov * lab 2 - Conditional statements vs without */ #include <stdio.h> #include <hip/hip_runtime.h> #include "../include/cuda_util.h" #define ITERATIONS 10000000 #define NUM_THREADS 256 __global__ void kernel_with_conditionals(); __global__ void kernel_without_conditionals(); int main() { hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); // with conditionals hipEventRecord(start,0); hipLaunchKernelGGL(( kernel_with_conditionals), dim3(1), dim3(NUM_THREADS), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Time with conditionals: %f\n", elapsedTime); // without conditionals hipEventRecord(start,0); hipLaunchKernelGGL(( kernel_without_conditionals), dim3(1),dim3(NUM_THREADS), 0, 0, ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Time without conditionals: %f\n", elapsedTime); return 0; } __global__ void kernel_with_conditionals() { int temp = 0; for(int i=0; i < ITERATIONS; i++) { if(threadIdx.x % 2 == 0) temp += 1; else temp -= 1; } __syncthreads(); } __global__ void kernel_without_conditionals() { int temp = 0; for(int i=0; i < ITERATIONS; i++) { temp += (-threadIdx.x%2) + (1 - threadIdx.x%2); } __syncthreads(); }
51f3a6090edf931182cebcf9d1c786b06cf2aa00.cu
/** * Yuri Gorokhov * lab 2 - Conditional statements vs without */ #include <stdio.h> #include <cuda.h> #include "../include/cuda_util.h" #define ITERATIONS 10000000 #define NUM_THREADS 256 __global__ void kernel_with_conditionals(); __global__ void kernel_without_conditionals(); int main() { cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); // with conditionals cudaEventRecord(start,0); kernel_with_conditionals<<<1, NUM_THREADS>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time with conditionals: %f\n", elapsedTime); // without conditionals cudaEventRecord(start,0); kernel_without_conditionals<<<1,NUM_THREADS>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time without conditionals: %f\n", elapsedTime); return 0; } __global__ void kernel_with_conditionals() { int temp = 0; for(int i=0; i < ITERATIONS; i++) { if(threadIdx.x % 2 == 0) temp += 1; else temp -= 1; } __syncthreads(); } __global__ void kernel_without_conditionals() { int temp = 0; for(int i=0; i < ITERATIONS; i++) { temp += (-threadIdx.x%2) + (1 - threadIdx.x%2); } __syncthreads(); }
d846a55dfb19958d70b4834588a3eb3a92412b1b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> using namespace std; int main() { thrust::host_vector<int> h_vec(24); thrust::generate(h_vec.begin(), h_vec.end(), rand); thrust::device_vector<int> d_vec = h_vec; thrust::sort(d_vec.begin(), d_vec.end()); thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); return 0; }
d846a55dfb19958d70b4834588a3eb3a92412b1b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> using namespace std; int main() { thrust::host_vector<int> h_vec(24); thrust::generate(h_vec.begin(), h_vec.end(), rand); thrust::device_vector<int> d_vec = h_vec; thrust::sort(d_vec.begin(), d_vec.end()); thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); return 0; }
e7b255aed785c661cb40bcd503aa5dbfd26f7ae0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef GPUDB_ACCESS_METHOD #define GPUDB_ACCESS_METHOD #include "hashtable.cu" #include "GPU_Dll.h" /* This file implements the tree searches and the hash probes. */ int tree_search(Record *d_R, int rLen, CUDA_CSSTree *tree, Record *d_S, int sLen, Record** d_Rout) { unsigned int timer=0; int* d_locations; // Location array on device GPUMALLOC((void**) &d_locations, sizeof(int) * sLen); startTimer(&timer); cuda_search_index(tree->data, tree->nDataNodes, tree->dir, tree->nDirNodes, d_S, d_locations, sLen); endTimer("cuda_search_index_usingKeys", &timer); //Record* d_Result; startTimer(&timer); int result = cuda_join_after_search((Record*)tree->data, rLen, d_S, d_locations, sLen, d_Rout); endTimer("cuda_join_after_search", &timer); hipDeviceSynchronize(); /*startTimer(&timer); copyBackToHost(d_Result, (void**)Rout, sizeof(Record) * result, 1, 1); endTimer("copy back", &timer);*/ GPUFREE(d_locations); return result; } extern "C" int GPUOnly_TreeSearch( Record* d_Rin, int rLen, CUDA_CSSTree* tree, Record* d_Sin, int sLen, Record** d_Rout ) { return tree_search(d_Rin, rLen, tree, d_Sin, sLen, d_Rout); } extern "C" int GPUCopy_TreeSearch( Record* h_Rin, int rLen, CUDA_CSSTree* tree, Record* h_Sin, int sLen, Record** h_Rout ) { Record* d_Rin; Record* d_Sin; Record* d_Rout; GPUMALLOC( (void**)&d_Rin, sizeof(Record)*rLen ); GPUMALLOC( (void**)&d_Sin, sizeof(Record)*sLen ); TOGPU( d_Rin, h_Rin, sizeof(Record)*rLen ); TOGPU( d_Sin, h_Sin, sizeof(Record)*sLen ); int outSize = tree_search(d_Rin, rLen, tree, d_Sin, sLen, &d_Rout); //*h_Rout = (Record*)malloc( sizeof(Record)*outSize ); CPUMALLOC( (void**)&(*h_Rout), sizeof(Record)*outSize ); FROMGPU( *h_Rout, d_Rout, sizeof(Record)*outSize ); GPUFREE( d_Rin ); GPUFREE( d_Sin ); GPUFREE( d_Rout ); return outSize; } int hashSearch(Record *d_R, int rLen, Bound* d_bound, int* d_keys, int sLen, Record** d_result, int numThreadsPerBlock = 512) { unsigned int timer=0; int *d_oSize; GPUMALLOC( (void**) & d_oSize, sLen*sizeof(int) ); Bound *d_oBound; GPUMALLOC( (void**) & d_oBound, sLen*sizeof(Record) ); int *d_sum; GPUMALLOC( (void**) & d_sum, sLen*sizeof(int) ); //int numThreadsPerBlock=512;//also the block size int blockSize=numThreadsPerBlock*2; int numBlock_x=sLen/blockSize; if(rLen%blockSize!=0) numBlock_x++; dim3 thread( numThreadsPerBlock, 1, 1); dim3 grid( numBlock_x, 1 , 1); int from, to; int numRun=16;//16 is the best. int runSize=rLen/numRun; printf("sLen, %d, numRun, %d\n", sLen, numRun); startTimer(&timer); for(int i=0;i<numRun;i++) { from=i*runSize; to=from + runSize; hipLaunchKernelGGL(( optProbe_kernel), dim3(grid), dim3(thread), 0, 0, d_bound,rLen,d_keys, d_oSize, d_oBound, blockSize, sLen, from, to); } endTimer("probe",&timer); startTimer(&timer); //saven_initialPrefixSum(sLen); scanImpl(d_oSize,sLen,d_sum); int *h_last; CPUMALLOC((void**) &h_last, sizeof(int)); FROMGPU(h_last, (d_oSize+sLen-1), sizeof(int)); int *h_lastSum; CPUMALLOC((void**) &h_lastSum, sizeof(int)); FROMGPU(h_lastSum, (d_sum+sLen-1), sizeof(int)); int total=*h_last+*h_lastSum; int *d_loc; GPUMALLOC( (void**) & d_loc, total*sizeof(int) ); GPUMALLOC( (void**) &(*d_result), total*sizeof(Record) ); numRun=16; printf("total result, %d, numRun II, %d\n", total, numRun); thread.x=256; grid.x=256; grid.y=sLen/grid.x/thread.x; hipLaunchKernelGGL(( location_kernel), dim3(grid), dim3(thread), 0, 0, d_loc, d_sum, d_oBound); endTimer("locations",&timer); startTimer(&timer); numThreadsPerBlock=512;//also the block size thread.x=numThreadsPerBlock; blockSize=numThreadsPerBlock*2; grid.x=128; grid.y=total/grid.x/numThreadsPerBlock; numRun=8;//16 is the best. runSize=rLen/numRun; printf("total II, %d, numRun, %d, grid.y, %d\n", total, numRun,grid.y); for(int i=0;i<numRun;i++) { from=i*runSize; to=from + runSize; hipLaunchKernelGGL(( optFetch_kernel), dim3(grid), dim3(thread), 0, 0, d_R, *d_result,d_loc, blockSize, total, from, to); } endTimer("fetch",&timer); GPUFREE(d_loc); GPUFREE(d_oSize); GPUFREE(d_oBound); GPUFREE(d_sum); return total; } extern "C" int GPUOnly_HashSearch( Record* d_R, int rLen, Bound* d_bound, int* d_keys, int sLen, Record** d_result, int numThread ) { return hashSearch(d_R, rLen, d_bound, d_keys, sLen, d_result, numThread); } extern "C" int GPUCopy_HashSearch( Record* h_R, int rLen, Bound* h_bound, int inBits, Record* h_S, int sLen, Record** h_Rout, int numThread ) { int* h_keys = (int*)malloc( sizeof(int)*sLen ); for( int i = 0; i < sLen; i++ ) { h_keys[i] = h_S[i].y; } int boundLen = TwoPowerN(inBits); Record* d_R; Bound* d_bound; int* d_keys; Record* d_Rout; GPUMALLOC( (void**)&d_R, sizeof(Record)*rLen ); GPUMALLOC( (void**)&d_bound, sizeof(Bound)*boundLen ); GPUMALLOC( (void**)&d_keys, sizeof(int)*sLen ); TOGPU( d_R, h_R, sizeof(Record)*rLen ); TOGPU( d_bound, h_bound, sizeof(Bound)*boundLen ); TOGPU( d_keys, h_keys, sizeof(int)*sLen ); int outSize = hashSearch(d_R, rLen, d_bound, d_keys, sLen, &d_Rout, numThread); //*h_Rout = (Record*)malloc( sizeof(Record)*outSize ); CPUMALLOC( (void**)&(*h_Rout), sizeof(Record)*outSize ); FROMGPU( *h_Rout, d_Rout, sizeof(Record)*outSize ); GPUFREE( d_R ); GPUFREE( d_bound ); GPUFREE( d_keys ); GPUFREE( d_Rout ); return outSize; } //write your testing code here. void testTreeSearch(int rLen, int sLen) { int result=0; int memSizeR=sizeof(Record)*rLen; int memSizeS=sizeof(Record)*sLen; Record *h_R; CPUMALLOC((void**)&h_R, memSizeR); generateSort(h_R, TEST_MAX,rLen,0); CUDA_CSSTree* tree; unsigned int timer=0; Record *h_S; CPUMALLOC((void**)&h_S, memSizeS); generateRand(h_S, TEST_MAX,sLen,1); Record *d_Rout; startTime(); startTimer(&timer); Record *d_R; GPUMALLOC((void**) & d_R, memSizeR); TOGPU(d_R, h_R, memSizeR); endTimer("copy R to GPU",&timer); startTimer(&timer); gpu_constructCSSTree(d_R, rLen, &tree); endTimer("tree construction", &timer); startTimer(&timer); Record *d_S; GPUMALLOC((void**) & d_S, sLen*sizeof(Record) ); TOGPU(d_S, h_S, memSizeS); endTimer("copy to GPU",&timer); //ninlj startTimer(&timer); result=tree_search(d_R,rLen,tree,d_S,sLen,&d_Rout); double processingTime=endTimer("tree search",&timer); startTimer(&timer); Record *h_result; CPUMALLOC((void**)&h_result, sizeof(Record)*result); FROMGPU(h_result, d_Rout, sizeof(Record)*result); endTimer("copy back", &timer); double sec=endTime("tree search"); //gpuPrint(d_Rout, rLen, "d_Rout"); printf("rLen, %d, sLen, %d, result, %d\n", rLen, sLen, result); CPUFREE(d_Rout); CPUFREE(h_R); CPUFREE(h_S); GPUFREE(d_R); GPUFREE(d_S); } void testHashSearch(int rLen, int sLen) { Record* h_R=NULL; CPUMALLOC((void**)&h_R, sizeof(Record)*rLen); generateSort(h_R, TEST_MAX, rLen, 0); Bound *h_bound=NULL; double bits=log2((double)rLen); int intBits=(int)bits; if(bits-intBits>=0.0000001) intBits++; intBits=intBits-1;//each bucket 8 tuples. int listLen=(1<<intBits); CPUMALLOC((void**)&h_bound, sizeof(Record)*listLen); buildHashTable(h_R, rLen,intBits,h_bound); Record *h_S; CPUMALLOC((void**)&h_S, sizeof(Record)*sLen); generateRand(h_S, TEST_MAX,sLen,1); //extract the keys. int* h_SKeys; CPUMALLOC((void**)&h_SKeys, sLen*sizeof(int)); int i=0; for(i=0;i<sLen;i++) { h_SKeys[i]=h_S[i].y; } CPUFREE(h_S); //device memory unsigned int timer=0; startTime(); startTimer(&timer); Record **h_Rout; CPUMALLOC((void**)&h_Rout,sizeof(Record*)); int *d_keys; GPUMALLOC( (void**) & d_keys, sLen*sizeof(int) ); TOGPU( d_keys, h_SKeys, sLen*sizeof(int)); Record *d_R; GPUMALLOC( (void**) & d_R, rLen*sizeof(Record) ); TOGPU( d_R, h_R, rLen*sizeof(Record)); Bound *d_bound; GPUMALLOC( (void**) & d_bound, listLen*sizeof(Record) ); TOGPU( d_bound, h_bound, listLen*sizeof(Record)); endTimer("copy to GPU",&timer); startTimer(&timer); hashSearch(d_R, rLen, d_bound, d_keys, sLen, h_Rout); endTimer("hash search",&timer); double sec=endTime("hash search"); CPUFREE(h_Rout); GPUFREE(d_bound); GPUFREE(d_R); GPUFREE(d_keys); } void test_AccessMethods(int argc, char **argv) { int i=0; for(i=0;i<argc;i++) { if(strcmp(argv[i],"-TreeSearch")==0) { int rLen=8*1024*1024; int sLen=8*1024*1024; if(argc==(i+3)) { rLen=atoi(argv[i+1])*1024; sLen=atoi(argv[i+2])*1024; } testTreeSearch(rLen,sLen); } if(strcmp(argv[i],"-HashSearch")==0) { int rLen=8*1024*1024; int sLen=8*1024*1024; if(argc==(i+3)) { rLen=atoi(argv[i+1])*1024; sLen=atoi(argv[i+2])*1024; } testHashSearch(rLen,sLen); } } } #endif
e7b255aed785c661cb40bcd503aa5dbfd26f7ae0.cu
#ifndef GPUDB_ACCESS_METHOD #define GPUDB_ACCESS_METHOD #include "hashtable.cu" #include "GPU_Dll.h" /* This file implements the tree searches and the hash probes. */ int tree_search(Record *d_R, int rLen, CUDA_CSSTree *tree, Record *d_S, int sLen, Record** d_Rout) { unsigned int timer=0; int* d_locations; // Location array on device GPUMALLOC((void**) &d_locations, sizeof(int) * sLen); startTimer(&timer); cuda_search_index(tree->data, tree->nDataNodes, tree->dir, tree->nDirNodes, d_S, d_locations, sLen); endTimer("cuda_search_index_usingKeys", &timer); //Record* d_Result; startTimer(&timer); int result = cuda_join_after_search((Record*)tree->data, rLen, d_S, d_locations, sLen, d_Rout); endTimer("cuda_join_after_search", &timer); cudaThreadSynchronize(); /*startTimer(&timer); copyBackToHost(d_Result, (void**)Rout, sizeof(Record) * result, 1, 1); endTimer("copy back", &timer);*/ GPUFREE(d_locations); return result; } extern "C" int GPUOnly_TreeSearch( Record* d_Rin, int rLen, CUDA_CSSTree* tree, Record* d_Sin, int sLen, Record** d_Rout ) { return tree_search(d_Rin, rLen, tree, d_Sin, sLen, d_Rout); } extern "C" int GPUCopy_TreeSearch( Record* h_Rin, int rLen, CUDA_CSSTree* tree, Record* h_Sin, int sLen, Record** h_Rout ) { Record* d_Rin; Record* d_Sin; Record* d_Rout; GPUMALLOC( (void**)&d_Rin, sizeof(Record)*rLen ); GPUMALLOC( (void**)&d_Sin, sizeof(Record)*sLen ); TOGPU( d_Rin, h_Rin, sizeof(Record)*rLen ); TOGPU( d_Sin, h_Sin, sizeof(Record)*sLen ); int outSize = tree_search(d_Rin, rLen, tree, d_Sin, sLen, &d_Rout); //*h_Rout = (Record*)malloc( sizeof(Record)*outSize ); CPUMALLOC( (void**)&(*h_Rout), sizeof(Record)*outSize ); FROMGPU( *h_Rout, d_Rout, sizeof(Record)*outSize ); GPUFREE( d_Rin ); GPUFREE( d_Sin ); GPUFREE( d_Rout ); return outSize; } int hashSearch(Record *d_R, int rLen, Bound* d_bound, int* d_keys, int sLen, Record** d_result, int numThreadsPerBlock = 512) { unsigned int timer=0; int *d_oSize; GPUMALLOC( (void**) & d_oSize, sLen*sizeof(int) ); Bound *d_oBound; GPUMALLOC( (void**) & d_oBound, sLen*sizeof(Record) ); int *d_sum; GPUMALLOC( (void**) & d_sum, sLen*sizeof(int) ); //int numThreadsPerBlock=512;//also the block size int blockSize=numThreadsPerBlock*2; int numBlock_x=sLen/blockSize; if(rLen%blockSize!=0) numBlock_x++; dim3 thread( numThreadsPerBlock, 1, 1); dim3 grid( numBlock_x, 1 , 1); int from, to; int numRun=16;//16 is the best. int runSize=rLen/numRun; printf("sLen, %d, numRun, %d\n", sLen, numRun); startTimer(&timer); for(int i=0;i<numRun;i++) { from=i*runSize; to=from + runSize; optProbe_kernel<<<grid, thread>>>(d_bound,rLen,d_keys, d_oSize, d_oBound, blockSize, sLen, from, to); } endTimer("probe",&timer); startTimer(&timer); //saven_initialPrefixSum(sLen); scanImpl(d_oSize,sLen,d_sum); int *h_last; CPUMALLOC((void**) &h_last, sizeof(int)); FROMGPU(h_last, (d_oSize+sLen-1), sizeof(int)); int *h_lastSum; CPUMALLOC((void**) &h_lastSum, sizeof(int)); FROMGPU(h_lastSum, (d_sum+sLen-1), sizeof(int)); int total=*h_last+*h_lastSum; int *d_loc; GPUMALLOC( (void**) & d_loc, total*sizeof(int) ); GPUMALLOC( (void**) &(*d_result), total*sizeof(Record) ); numRun=16; printf("total result, %d, numRun II, %d\n", total, numRun); thread.x=256; grid.x=256; grid.y=sLen/grid.x/thread.x; location_kernel<<<grid, thread>>>(d_loc, d_sum, d_oBound); endTimer("locations",&timer); startTimer(&timer); numThreadsPerBlock=512;//also the block size thread.x=numThreadsPerBlock; blockSize=numThreadsPerBlock*2; grid.x=128; grid.y=total/grid.x/numThreadsPerBlock; numRun=8;//16 is the best. runSize=rLen/numRun; printf("total II, %d, numRun, %d, grid.y, %d\n", total, numRun,grid.y); for(int i=0;i<numRun;i++) { from=i*runSize; to=from + runSize; optFetch_kernel<<<grid, thread>>>(d_R, *d_result,d_loc, blockSize, total, from, to); } endTimer("fetch",&timer); GPUFREE(d_loc); GPUFREE(d_oSize); GPUFREE(d_oBound); GPUFREE(d_sum); return total; } extern "C" int GPUOnly_HashSearch( Record* d_R, int rLen, Bound* d_bound, int* d_keys, int sLen, Record** d_result, int numThread ) { return hashSearch(d_R, rLen, d_bound, d_keys, sLen, d_result, numThread); } extern "C" int GPUCopy_HashSearch( Record* h_R, int rLen, Bound* h_bound, int inBits, Record* h_S, int sLen, Record** h_Rout, int numThread ) { int* h_keys = (int*)malloc( sizeof(int)*sLen ); for( int i = 0; i < sLen; i++ ) { h_keys[i] = h_S[i].y; } int boundLen = TwoPowerN(inBits); Record* d_R; Bound* d_bound; int* d_keys; Record* d_Rout; GPUMALLOC( (void**)&d_R, sizeof(Record)*rLen ); GPUMALLOC( (void**)&d_bound, sizeof(Bound)*boundLen ); GPUMALLOC( (void**)&d_keys, sizeof(int)*sLen ); TOGPU( d_R, h_R, sizeof(Record)*rLen ); TOGPU( d_bound, h_bound, sizeof(Bound)*boundLen ); TOGPU( d_keys, h_keys, sizeof(int)*sLen ); int outSize = hashSearch(d_R, rLen, d_bound, d_keys, sLen, &d_Rout, numThread); //*h_Rout = (Record*)malloc( sizeof(Record)*outSize ); CPUMALLOC( (void**)&(*h_Rout), sizeof(Record)*outSize ); FROMGPU( *h_Rout, d_Rout, sizeof(Record)*outSize ); GPUFREE( d_R ); GPUFREE( d_bound ); GPUFREE( d_keys ); GPUFREE( d_Rout ); return outSize; } //write your testing code here. void testTreeSearch(int rLen, int sLen) { int result=0; int memSizeR=sizeof(Record)*rLen; int memSizeS=sizeof(Record)*sLen; Record *h_R; CPUMALLOC((void**)&h_R, memSizeR); generateSort(h_R, TEST_MAX,rLen,0); CUDA_CSSTree* tree; unsigned int timer=0; Record *h_S; CPUMALLOC((void**)&h_S, memSizeS); generateRand(h_S, TEST_MAX,sLen,1); Record *d_Rout; startTime(); startTimer(&timer); Record *d_R; GPUMALLOC((void**) & d_R, memSizeR); TOGPU(d_R, h_R, memSizeR); endTimer("copy R to GPU",&timer); startTimer(&timer); gpu_constructCSSTree(d_R, rLen, &tree); endTimer("tree construction", &timer); startTimer(&timer); Record *d_S; GPUMALLOC((void**) & d_S, sLen*sizeof(Record) ); TOGPU(d_S, h_S, memSizeS); endTimer("copy to GPU",&timer); //ninlj startTimer(&timer); result=tree_search(d_R,rLen,tree,d_S,sLen,&d_Rout); double processingTime=endTimer("tree search",&timer); startTimer(&timer); Record *h_result; CPUMALLOC((void**)&h_result, sizeof(Record)*result); FROMGPU(h_result, d_Rout, sizeof(Record)*result); endTimer("copy back", &timer); double sec=endTime("tree search"); //gpuPrint(d_Rout, rLen, "d_Rout"); printf("rLen, %d, sLen, %d, result, %d\n", rLen, sLen, result); CPUFREE(d_Rout); CPUFREE(h_R); CPUFREE(h_S); GPUFREE(d_R); GPUFREE(d_S); } void testHashSearch(int rLen, int sLen) { Record* h_R=NULL; CPUMALLOC((void**)&h_R, sizeof(Record)*rLen); generateSort(h_R, TEST_MAX, rLen, 0); Bound *h_bound=NULL; double bits=log2((double)rLen); int intBits=(int)bits; if(bits-intBits>=0.0000001) intBits++; intBits=intBits-1;//each bucket 8 tuples. int listLen=(1<<intBits); CPUMALLOC((void**)&h_bound, sizeof(Record)*listLen); buildHashTable(h_R, rLen,intBits,h_bound); Record *h_S; CPUMALLOC((void**)&h_S, sizeof(Record)*sLen); generateRand(h_S, TEST_MAX,sLen,1); //extract the keys. int* h_SKeys; CPUMALLOC((void**)&h_SKeys, sLen*sizeof(int)); int i=0; for(i=0;i<sLen;i++) { h_SKeys[i]=h_S[i].y; } CPUFREE(h_S); //device memory unsigned int timer=0; startTime(); startTimer(&timer); Record **h_Rout; CPUMALLOC((void**)&h_Rout,sizeof(Record*)); int *d_keys; GPUMALLOC( (void**) & d_keys, sLen*sizeof(int) ); TOGPU( d_keys, h_SKeys, sLen*sizeof(int)); Record *d_R; GPUMALLOC( (void**) & d_R, rLen*sizeof(Record) ); TOGPU( d_R, h_R, rLen*sizeof(Record)); Bound *d_bound; GPUMALLOC( (void**) & d_bound, listLen*sizeof(Record) ); TOGPU( d_bound, h_bound, listLen*sizeof(Record)); endTimer("copy to GPU",&timer); startTimer(&timer); hashSearch(d_R, rLen, d_bound, d_keys, sLen, h_Rout); endTimer("hash search",&timer); double sec=endTime("hash search"); CPUFREE(h_Rout); GPUFREE(d_bound); GPUFREE(d_R); GPUFREE(d_keys); } void test_AccessMethods(int argc, char **argv) { int i=0; for(i=0;i<argc;i++) { if(strcmp(argv[i],"-TreeSearch")==0) { int rLen=8*1024*1024; int sLen=8*1024*1024; if(argc==(i+3)) { rLen=atoi(argv[i+1])*1024; sLen=atoi(argv[i+2])*1024; } testTreeSearch(rLen,sLen); } if(strcmp(argv[i],"-HashSearch")==0) { int rLen=8*1024*1024; int sLen=8*1024*1024; if(argc==(i+3)) { rLen=atoi(argv[i+1])*1024; sLen=atoi(argv[i+2])*1024; } testHashSearch(rLen,sLen); } } } #endif
3f778d056982f7d0f0b14e24144ef2e189b0ead1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/logspace_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/data_type_transform.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T> __global__ void LogspaceKernelInner( T start, T stop, double step, T base, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(pow(static_cast<double>(base), static_cast<double>(start + step * index))); } else { out[index] = static_cast<T>( pow(static_cast<double>(base), static_cast<double>(stop - step * (size - index - 1)))); } } } template <typename T> __global__ void LogspaceSpecialKernel(T start, T base, T* out) { out[0] = static_cast<T>( pow(static_cast<double>(base), static_cast<double>(start))); } template <typename T, typename Context> void LogspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, const DenseTensor& base, DataType dtype, DenseTensor* out) { auto start_t = phi::funcs::TransDataType(ctx, start, dtype); auto stop_t = phi::funcs::TransDataType(ctx, stop, dtype); auto base_t = phi::funcs::TransDataType(ctx, base, dtype); DenseTensor n_start; DenseTensor n_stop; DenseTensor n_num; DenseTensor n_base; phi::Copy(ctx, start_t, phi::CPUPlace(), false, &n_start); T start_data = n_start.data<T>()[0]; phi::Copy(ctx, stop_t, phi::CPUPlace(), false, &n_stop); T stop_data = n_stop.data<T>()[0]; phi::Copy(ctx, number, phi::CPUPlace(), false, &n_num); int64_t num = static_cast<int64_t>(n_num.data<int32_t>()[0]); phi::Copy(ctx, base_t, phi::CPUPlace(), false, &n_base); T base_data = n_base.data<T>()[0]; PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of logspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); double step = 0; auto stream = ctx.stream(); int block = 512; int grid = (num + block - 1) / block; if (num != 1) { step = (static_cast<double>(stop_data - start_data)) / (num - 1); hipLaunchKernelGGL(( LogspaceKernelInner<T>), dim3(grid), dim3(block), 0, stream, start_data, stop_data, step, base_data, num, out_data); } else { hipLaunchKernelGGL(( LogspaceSpecialKernel<T>), dim3(grid), dim3(block), 0, stream, start_data, base_data, out_data); } } } // namespace phi PD_REGISTER_KERNEL(logspace, GPU, ALL_LAYOUT, phi::LogspaceKernel, float, int32_t, int64_t, double) {}
3f778d056982f7d0f0b14e24144ef2e189b0ead1.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/logspace_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/data_type_transform.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T> __global__ void LogspaceKernelInner( T start, T stop, double step, T base, int64_t size, T* out) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < size; index += blockDim.x * gridDim.x) { if (index < size / 2) { out[index] = static_cast<T>(pow(static_cast<double>(base), static_cast<double>(start + step * index))); } else { out[index] = static_cast<T>( pow(static_cast<double>(base), static_cast<double>(stop - step * (size - index - 1)))); } } } template <typename T> __global__ void LogspaceSpecialKernel(T start, T base, T* out) { out[0] = static_cast<T>( pow(static_cast<double>(base), static_cast<double>(start))); } template <typename T, typename Context> void LogspaceKernel(const Context& ctx, const DenseTensor& start, const DenseTensor& stop, const DenseTensor& number, const DenseTensor& base, DataType dtype, DenseTensor* out) { auto start_t = phi::funcs::TransDataType(ctx, start, dtype); auto stop_t = phi::funcs::TransDataType(ctx, stop, dtype); auto base_t = phi::funcs::TransDataType(ctx, base, dtype); DenseTensor n_start; DenseTensor n_stop; DenseTensor n_num; DenseTensor n_base; phi::Copy(ctx, start_t, phi::CPUPlace(), false, &n_start); T start_data = n_start.data<T>()[0]; phi::Copy(ctx, stop_t, phi::CPUPlace(), false, &n_stop); T stop_data = n_stop.data<T>()[0]; phi::Copy(ctx, number, phi::CPUPlace(), false, &n_num); int64_t num = static_cast<int64_t>(n_num.data<int32_t>()[0]); phi::Copy(ctx, base_t, phi::CPUPlace(), false, &n_base); T base_data = n_base.data<T>()[0]; PADDLE_ENFORCE_GT( num, 0, phi::errors::InvalidArgument("The num of logspace op should be larger " "than 0, but received num is %d", num)); out->Resize(phi::make_ddim({num})); T* out_data = ctx.template Alloc<T>(out); double step = 0; auto stream = ctx.stream(); int block = 512; int grid = (num + block - 1) / block; if (num != 1) { step = (static_cast<double>(stop_data - start_data)) / (num - 1); LogspaceKernelInner<T><<<grid, block, 0, stream>>>( start_data, stop_data, step, base_data, num, out_data); } else { LogspaceSpecialKernel<T><<<grid, block, 0, stream>>>( start_data, base_data, out_data); } } } // namespace phi PD_REGISTER_KERNEL(logspace, GPU, ALL_LAYOUT, phi::LogspaceKernel, float, int32_t, int64_t, double) {}
65766b67b26e9332d2d525594dcb98bbd451c10e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_fr1 [8][2]; static int dims_update_halo_kernel1_fr1_h [8][2] = {0}; //user function __device__ inline void update_halo_kernel1_fr1_gpu(ACC<double> &density0, ACC<double> &density1, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &pressure, ACC<double> &viscosity, ACC<double> &soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,-1); if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,-1); if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,-1); if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,-1); if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,-1); if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,-1); if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,-1); } __global__ void ops_update_halo_kernel1_fr1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[0][0] * dims_update_halo_kernel1_fr1[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[1][0] * dims_update_halo_kernel1_fr1[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[2][0] * dims_update_halo_kernel1_fr1[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[3][0] * dims_update_halo_kernel1_fr1[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[4][0] * dims_update_halo_kernel1_fr1[4][1]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[5][0] * dims_update_halo_kernel1_fr1[5][1]; arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[6][0] * dims_update_halo_kernel1_fr1[6][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel1_fr1[0][0], dims_update_halo_kernel1_fr1[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel1_fr1[1][0], dims_update_halo_kernel1_fr1[1][1], arg1); ACC<double> argp2(dims_update_halo_kernel1_fr1[2][0], dims_update_halo_kernel1_fr1[2][1], arg2); ACC<double> argp3(dims_update_halo_kernel1_fr1[3][0], dims_update_halo_kernel1_fr1[3][1], arg3); ACC<double> argp4(dims_update_halo_kernel1_fr1[4][0], dims_update_halo_kernel1_fr1[4][1], arg4); ACC<double> argp5(dims_update_halo_kernel1_fr1[5][0], dims_update_halo_kernel1_fr1[5][1], arg5); ACC<double> argp6(dims_update_halo_kernel1_fr1[6][0], dims_update_halo_kernel1_fr1[6][1], arg6); update_halo_kernel1_fr1_gpu(argp0, argp1, argp2, argp3, argp4, argp5, argp6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_fr1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,23)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(23,"update_halo_kernel1_fr1"); OPS_kernels[23].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != dims_update_halo_kernel1_fr1_h[0][0] || ydim0 != dims_update_halo_kernel1_fr1_h[0][1] || xdim1 != dims_update_halo_kernel1_fr1_h[1][0] || ydim1 != dims_update_halo_kernel1_fr1_h[1][1] || xdim2 != dims_update_halo_kernel1_fr1_h[2][0] || ydim2 != dims_update_halo_kernel1_fr1_h[2][1] || xdim3 != dims_update_halo_kernel1_fr1_h[3][0] || ydim3 != dims_update_halo_kernel1_fr1_h[3][1] || xdim4 != dims_update_halo_kernel1_fr1_h[4][0] || ydim4 != dims_update_halo_kernel1_fr1_h[4][1] || xdim5 != dims_update_halo_kernel1_fr1_h[5][0] || ydim5 != dims_update_halo_kernel1_fr1_h[5][1] || xdim6 != dims_update_halo_kernel1_fr1_h[6][0] || ydim6 != dims_update_halo_kernel1_fr1_h[6][1]) { dims_update_halo_kernel1_fr1_h[0][0] = xdim0; dims_update_halo_kernel1_fr1_h[0][1] = ydim0; dims_update_halo_kernel1_fr1_h[1][0] = xdim1; dims_update_halo_kernel1_fr1_h[1][1] = ydim1; dims_update_halo_kernel1_fr1_h[2][0] = xdim2; dims_update_halo_kernel1_fr1_h[2][1] = ydim2; dims_update_halo_kernel1_fr1_h[3][0] = xdim3; dims_update_halo_kernel1_fr1_h[3][1] = ydim3; dims_update_halo_kernel1_fr1_h[4][0] = xdim4; dims_update_halo_kernel1_fr1_h[4][1] = ydim4; dims_update_halo_kernel1_fr1_h[5][0] = xdim5; dims_update_halo_kernel1_fr1_h[5][1] = ydim5; dims_update_halo_kernel1_fr1_h[6][0] = xdim6; dims_update_halo_kernel1_fr1_h[6][1] = ydim6; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_fr1, dims_update_halo_kernel1_fr1_h, sizeof(dims_update_halo_kernel1_fr1))); } int *arg7h = (int *)arg7.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[23].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_fr1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[23].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[23].mpi_time += t2-t1; OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 23; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 23; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_fr1_execute; if (OPS_diags > 1) { ops_timing_realloc(23,"update_halo_kernel1_fr1"); } ops_enqueue_kernel(desc); } #endif
65766b67b26e9332d2d525594dcb98bbd451c10e.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_fr1 [8][2]; static int dims_update_halo_kernel1_fr1_h [8][2] = {0}; //user function __device__ inline void update_halo_kernel1_fr1_gpu(ACC<double> &density0, ACC<double> &density1, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &pressure, ACC<double> &viscosity, ACC<double> &soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,-1); if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,-1); if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,-1); if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,-1); if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,-1); if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,-1); if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,-1); } __global__ void ops_update_halo_kernel1_fr1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[0][0] * dims_update_halo_kernel1_fr1[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[1][0] * dims_update_halo_kernel1_fr1[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[2][0] * dims_update_halo_kernel1_fr1[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[3][0] * dims_update_halo_kernel1_fr1[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[4][0] * dims_update_halo_kernel1_fr1[4][1]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[5][0] * dims_update_halo_kernel1_fr1[5][1]; arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_fr1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_fr1[6][0] * dims_update_halo_kernel1_fr1[6][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel1_fr1[0][0], dims_update_halo_kernel1_fr1[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel1_fr1[1][0], dims_update_halo_kernel1_fr1[1][1], arg1); ACC<double> argp2(dims_update_halo_kernel1_fr1[2][0], dims_update_halo_kernel1_fr1[2][1], arg2); ACC<double> argp3(dims_update_halo_kernel1_fr1[3][0], dims_update_halo_kernel1_fr1[3][1], arg3); ACC<double> argp4(dims_update_halo_kernel1_fr1[4][0], dims_update_halo_kernel1_fr1[4][1], arg4); ACC<double> argp5(dims_update_halo_kernel1_fr1[5][0], dims_update_halo_kernel1_fr1[5][1], arg5); ACC<double> argp6(dims_update_halo_kernel1_fr1[6][0], dims_update_halo_kernel1_fr1[6][1], arg6); update_halo_kernel1_fr1_gpu(argp0, argp1, argp2, argp3, argp4, argp5, argp6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_fr1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,23)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(23,"update_halo_kernel1_fr1"); OPS_kernels[23].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != dims_update_halo_kernel1_fr1_h[0][0] || ydim0 != dims_update_halo_kernel1_fr1_h[0][1] || xdim1 != dims_update_halo_kernel1_fr1_h[1][0] || ydim1 != dims_update_halo_kernel1_fr1_h[1][1] || xdim2 != dims_update_halo_kernel1_fr1_h[2][0] || ydim2 != dims_update_halo_kernel1_fr1_h[2][1] || xdim3 != dims_update_halo_kernel1_fr1_h[3][0] || ydim3 != dims_update_halo_kernel1_fr1_h[3][1] || xdim4 != dims_update_halo_kernel1_fr1_h[4][0] || ydim4 != dims_update_halo_kernel1_fr1_h[4][1] || xdim5 != dims_update_halo_kernel1_fr1_h[5][0] || ydim5 != dims_update_halo_kernel1_fr1_h[5][1] || xdim6 != dims_update_halo_kernel1_fr1_h[6][0] || ydim6 != dims_update_halo_kernel1_fr1_h[6][1]) { dims_update_halo_kernel1_fr1_h[0][0] = xdim0; dims_update_halo_kernel1_fr1_h[0][1] = ydim0; dims_update_halo_kernel1_fr1_h[1][0] = xdim1; dims_update_halo_kernel1_fr1_h[1][1] = ydim1; dims_update_halo_kernel1_fr1_h[2][0] = xdim2; dims_update_halo_kernel1_fr1_h[2][1] = ydim2; dims_update_halo_kernel1_fr1_h[3][0] = xdim3; dims_update_halo_kernel1_fr1_h[3][1] = ydim3; dims_update_halo_kernel1_fr1_h[4][0] = xdim4; dims_update_halo_kernel1_fr1_h[4][1] = ydim4; dims_update_halo_kernel1_fr1_h[5][0] = xdim5; dims_update_halo_kernel1_fr1_h[5][1] = ydim5; dims_update_halo_kernel1_fr1_h[6][0] = xdim6; dims_update_halo_kernel1_fr1_h[6][1] = ydim6; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_fr1, dims_update_halo_kernel1_fr1_h, sizeof(dims_update_halo_kernel1_fr1))); } int *arg7h = (int *)arg7.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[23].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel1_fr1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[23].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[23].mpi_time += t2-t1; OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 23; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 23; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_fr1_execute; if (OPS_diags > 1) { ops_timing_realloc(23,"update_halo_kernel1_fr1"); } ops_enqueue_kernel(desc); } #endif
d2151566dcb9b9633f39b546d88da596b10ce6ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <errno.h> #include <time.h> #include <stdbool.h> /* reverseArray @params @return void */ __global__ void reverseArray(int *d_out, int *d_in) { extern __shared__ int s_data[]; int inOffset = blockDim.x * blockIdx.x; int in = inOffset + threadIdx.x; // Load one element per thread from device memory and store it // in reversed order into temporary shared memory s_data[blockDim.x - 1 - threadIdx.x] = d_in[in]; // Block until all threads in the block have written their data to shared mem __syncthreads(); // write the data from shared memory in forward order, // but to the reversed block offset as before int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int out = outOffset + threadIdx.x; d_out[out] = s_data[threadIdx.x]; } /* main program */ int main(int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 16*1024*1024; // 16 MB // array to compare results int *check; // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 8; // Compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // Part 1 of 2: Compute the number of bytes of shared memory needed // This is used in the kernel invocation below int sharedMemSize = numThreadsPerBlock * sizeof(int); // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); check = (int *) malloc(memSize); hipMalloc((void **) &d_a, memSize); hipMalloc((void **) &d_b, memSize); // Initialize input array on host int val; srand(time(0)); for (int i = 0; i < dimA; ++i) { val = rand(); h_a[i] = val; check[i] = val; } // Copy host array to device array hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); hipLaunchKernelGGL(( reverseArray), dim3(dimGrid), dim3(dimBlock), sharedMemSize , 0, d_b, d_a); // block until the device has completed hipDeviceSynchronize(); // device to host copy hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost ); printf("Verifying program correctness.... "); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == check[dimA - 1 - i]); } printf("Everthing checks out!\n"); // free device memory hipFree(d_a); hipFree(d_b); // free host memory free(h_a); free(check); return 0; } //qsub hw10.sh -q UI-GPU -I ngpus=1
d2151566dcb9b9633f39b546d88da596b10ce6ad.cu
#include <stdio.h> #include <assert.h> #include <stdlib.h> #include <errno.h> #include <time.h> #include <stdbool.h> /* reverseArray @params @return void */ __global__ void reverseArray(int *d_out, int *d_in) { extern __shared__ int s_data[]; int inOffset = blockDim.x * blockIdx.x; int in = inOffset + threadIdx.x; // Load one element per thread from device memory and store it // in reversed order into temporary shared memory s_data[blockDim.x - 1 - threadIdx.x] = d_in[in]; // Block until all threads in the block have written their data to shared mem __syncthreads(); // write the data from shared memory in forward order, // but to the reversed block offset as before int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int out = outOffset + threadIdx.x; d_out[out] = s_data[threadIdx.x]; } /* main program */ int main(int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 16*1024*1024; // 16 MB // array to compare results int *check; // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 8; // Compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // Part 1 of 2: Compute the number of bytes of shared memory needed // This is used in the kernel invocation below int sharedMemSize = numThreadsPerBlock * sizeof(int); // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); check = (int *) malloc(memSize); cudaMalloc((void **) &d_a, memSize); cudaMalloc((void **) &d_b, memSize); // Initialize input array on host int val; srand(time(0)); for (int i = 0; i < dimA; ++i) { val = rand(); h_a[i] = val; check[i] = val; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); reverseArray<<< dimGrid, dimBlock, sharedMemSize >>>(d_b, d_a); // block until the device has completed cudaThreadSynchronize(); // device to host copy cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); printf("Verifying program correctness.... "); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == check[dimA - 1 - i]); } printf("Everthing checks out!\n"); // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); free(check); return 0; } //qsub hw10.sh -q UI-GPU -I ngpus=1
aed3f78d06fdc7729d8c56546214bd9acf3086aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2018 XGBoost contributors */ #include <xgboost/logging.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <utility> #include <vector> #include <memory> #include <mutex> #include "hist_util.h" #include "xgboost/host_device_vector.h" #include "device_helpers_hip.cuh" #include "quantile.h" #include "../tree/param.h" namespace xgboost { namespace common { using WXQSketch = DenseCuts::WXQSketch; __global__ void FindCutsK(WXQSketch::Entry* __restrict__ cuts, const bst_float* __restrict__ data, const float* __restrict__ cum_weights, int nsamples, int ncuts) { // ncuts < nsamples int icut = threadIdx.x + blockIdx.x * blockDim.x; if (icut >= ncuts) { return; } int isample = 0; if (icut == 0) { isample = 0; } else if (icut == ncuts - 1) { isample = nsamples - 1; } else { bst_float rank = cum_weights[nsamples - 1] / static_cast<float>(ncuts - 1) * static_cast<float>(icut); // -1 is used because cum_weights is an inclusive sum isample = dh::UpperBound(cum_weights, nsamples, rank); isample = max(0, min(isample, nsamples - 1)); } // repeated values will be filtered out on the CPU bst_float rmin = isample > 0 ? cum_weights[isample - 1] : 0; bst_float rmax = cum_weights[isample]; cuts[icut] = WXQSketch::Entry(rmin, rmax, rmax - rmin, data[isample]); } // predictate for thrust filtering that returns true if the element is not a NaN struct IsNotNaN { __device__ bool operator()(float a) const { return !isnan(a); } }; __global__ void UnpackFeaturesK(float* __restrict__ fvalues, float* __restrict__ feature_weights, const size_t* __restrict__ row_ptrs, const float* __restrict__ weights, Entry* entries, size_t nrows_array, size_t row_begin_ptr, size_t nrows) { size_t irow = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (irow >= nrows) { return; } size_t row_length = row_ptrs[irow + 1] - row_ptrs[irow]; int icol = threadIdx.y + blockIdx.y * blockDim.y; if (icol >= row_length) { return; } Entry entry = entries[row_ptrs[irow] - row_begin_ptr + icol]; size_t ind = entry.index * nrows_array + irow; // if weights are present, ensure that a non-NaN value is written to weights // if and only if it is also written to features if (!isnan(entry.fvalue) && (weights == nullptr || !isnan(weights[irow]))) { fvalues[ind] = entry.fvalue; if (feature_weights != nullptr && weights != nullptr) { feature_weights[ind] = weights[irow]; } } } /*! * \brief A container that holds the device sketches across all * sparse page batches which are distributed to different devices. * As sketches are aggregated by column, the mutex guards * multiple devices pushing sketch summary for the same column * across distinct rows. */ struct SketchContainer { std::vector<DenseCuts::WXQSketch> sketches_; // NOLINT std::vector<std::mutex> col_locks_; // NOLINT static constexpr int kOmpNumColsParallelizeLimit = 1000; SketchContainer(int max_bin, DMatrix* dmat) : col_locks_(dmat->Info().num_col_) { const MetaInfo& info = dmat->Info(); // Initialize Sketches for this dmatrix sketches_.resize(info.num_col_); #pragma omp parallel for default(none) shared(info, max_bin) schedule(static) \ if (info.num_col_ > kOmpNumColsParallelizeLimit) // NOLINT for (int icol = 0; icol < info.num_col_; ++icol) { // NOLINT sketches_[icol].Init(info.num_row_, 1.0 / (8 * max_bin)); } } // Prevent copying/assigning/moving this as its internals can't be assigned/copied/moved SketchContainer(const SketchContainer &) = delete; SketchContainer(const SketchContainer &&) = delete; SketchContainer &operator=(const SketchContainer &) = delete; SketchContainer &operator=(const SketchContainer &&) = delete; }; // finds quantiles on the GPU class GPUSketcher { public: GPUSketcher(int device, int max_bin, int gpu_nrows) : device_(device), max_bin_(max_bin), gpu_batch_nrows_(gpu_nrows), row_stride_(0) {} ~GPUSketcher() { // NOLINT dh::safe_cuda(hipSetDevice(device_)); } void SketchBatch(const SparsePage &batch, const MetaInfo &info) { n_rows_ = batch.Size(); Init(batch, info, gpu_batch_nrows_); Sketch(batch, info); ComputeRowStride(); } /* Builds the sketches on the GPU for the dmatrix and returns the row stride * for the entire dataset */ size_t Sketch(DMatrix *dmat, DenseCuts *hmat) { const MetaInfo& info = dmat->Info(); row_stride_ = 0; sketch_container_.reset(new SketchContainer(max_bin_, dmat)); for (const auto& batch : dmat->GetBatches<SparsePage>()) { this->SketchBatch(batch, info); } hmat->Init(&sketch_container_->sketches_, max_bin_); return row_stride_; } // This needs to be public because of the __device__ lambda. void ComputeRowStride() { // Find the row stride for this batch auto row_iter = row_ptrs_.begin(); // Functor for finding the maximum row size for this batch auto get_size = [=] __device__(size_t row) { return row_iter[row + 1] - row_iter[row]; }; // NOLINT auto counting = thrust::make_counting_iterator(size_t(0)); using TransformT = thrust::transform_iterator<decltype(get_size), decltype(counting), size_t>; TransformT row_size_iter = TransformT(counting, get_size); size_t batch_row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows_, 0, thrust::maximum<size_t>()); row_stride_ = ::max(row_stride_, batch_row_stride); } // This needs to be public because of the __device__ lambda. void FindColumnCuts(size_t batch_nrows, size_t icol) { size_t tmp_size = tmp_storage_.size(); // filter out NaNs in feature values auto fvalues_begin = fvalues_.data() + icol * gpu_batch_nrows_; hipcub::DeviceSelect::If(tmp_storage_.data().get(), tmp_size, fvalues_begin, fvalues_cur_.data(), num_elements_.begin(), batch_nrows, IsNotNaN()); size_t nfvalues_cur = 0; thrust::copy_n(num_elements_.begin(), 1, &nfvalues_cur); // compute cumulative weights using a prefix scan if (has_weights_) { // filter out NaNs in weights; // since hipcub::DeviceSelect::If performs stable filtering, // the weights are stored in the correct positions auto feature_weights_begin = feature_weights_.data() + icol * gpu_batch_nrows_; hipcub::DeviceSelect::If(tmp_storage_.data().get(), tmp_size, feature_weights_begin, weights_.data().get(), num_elements_.begin(), batch_nrows, IsNotNaN()); // sort the values and weights hipcub::DeviceRadixSort::SortPairs(tmp_storage_.data().get(), tmp_size, fvalues_cur_.data().get(), fvalues_begin.get(), weights_.data().get(), weights2_.data().get(), nfvalues_cur); // sum the weights to get cumulative weight values hipcub::DeviceScan::InclusiveSum(tmp_storage_.data().get(), tmp_size, weights2_.begin(), weights_.begin(), nfvalues_cur); } else { // sort the batch values hipcub::DeviceRadixSort::SortKeys(tmp_storage_.data().get(), tmp_size, fvalues_cur_.data().get(), fvalues_begin.get(), nfvalues_cur); // fill in cumulative weights with counting iterator thrust::copy_n(thrust::make_counting_iterator(1), nfvalues_cur, weights_.begin()); } // remove repeated items and sum the weights across them; // non-negative weights are assumed hipcub::DeviceReduce::ReduceByKey(tmp_storage_.data().get(), tmp_size, fvalues_begin, fvalues_cur_.begin(), weights_.begin(), weights2_.begin(), num_elements_.begin(), thrust::maximum<bst_float>(), nfvalues_cur); size_t n_unique = 0; thrust::copy_n(num_elements_.begin(), 1, &n_unique); // extract cuts n_cuts_cur_[icol] = ::min(n_cuts_, n_unique); // if less elements than cuts: copy all elements with their weights if (n_cuts_ > n_unique) { float* weights2_ptr = weights2_.data().get(); float* fvalues_ptr = fvalues_cur_.data().get(); WXQSketch::Entry* cuts_ptr = cuts_d_.data().get() + icol * n_cuts_; dh::LaunchN(device_, n_unique, [=]__device__(size_t i) { bst_float rmax = weights2_ptr[i]; bst_float rmin = i > 0 ? weights2_ptr[i - 1] : 0; cuts_ptr[i] = WXQSketch::Entry(rmin, rmax, rmax - rmin, fvalues_ptr[i]); }); } else if (n_cuts_cur_[icol] > 0) { // if more elements than cuts: use binary search on cumulative weights uint32_t constexpr kBlockThreads = 256; uint32_t const kGrids = common::DivRoundUp(n_cuts_cur_[icol], kBlockThreads); dh::LaunchKernel {kGrids, kBlockThreads} ( FindCutsK, cuts_d_.data().get() + icol * n_cuts_, fvalues_cur_.data().get(), weights2_.data().get(), n_unique, n_cuts_cur_[icol]); dh::safe_cuda(hipGetLastError()); // NOLINT } } private: void Init(const SparsePage& row_batch, const MetaInfo& info, int gpu_batch_nrows) { num_cols_ = info.num_col_; has_weights_ = info.weights_.Size() > 0; // find the batch size if (gpu_batch_nrows == 0) { // By default, use no more than 1/16th of GPU memory gpu_batch_nrows_ = dh::TotalMemory(device_) / (16 * num_cols_ * sizeof(Entry)); } else if (gpu_batch_nrows == -1) { gpu_batch_nrows_ = n_rows_; } else { gpu_batch_nrows_ = gpu_batch_nrows; } if (gpu_batch_nrows_ > n_rows_) { gpu_batch_nrows_ = n_rows_; } constexpr int kFactor = 8; double eps = 1.0 / (kFactor * max_bin_); size_t dummy_nlevel; WXQSketch::LimitSizeLevel(gpu_batch_nrows_, eps, &dummy_nlevel, &n_cuts_); // allocate necessary GPU buffers dh::safe_cuda(hipSetDevice(device_)); entries_.resize(gpu_batch_nrows_ * num_cols_); fvalues_.resize(gpu_batch_nrows_ * num_cols_); fvalues_cur_.resize(gpu_batch_nrows_); cuts_d_.resize(n_cuts_ * num_cols_); cuts_h_.resize(n_cuts_ * num_cols_); weights_.resize(gpu_batch_nrows_); weights2_.resize(gpu_batch_nrows_); num_elements_.resize(1); if (has_weights_) { feature_weights_.resize(gpu_batch_nrows_ * num_cols_); } n_cuts_cur_.resize(num_cols_); // allocate storage for CUB algorithms; the size is the maximum of the sizes // required for various algorithm size_t tmp_size = 0, cur_tmp_size = 0; // size for sorting if (has_weights_) { hipcub::DeviceRadixSort::SortPairs(nullptr, cur_tmp_size, fvalues_cur_.data().get(), fvalues_.data().get(), weights_.data().get(), weights2_.data().get(), gpu_batch_nrows_); } else { hipcub::DeviceRadixSort::SortKeys(nullptr, cur_tmp_size, fvalues_cur_.data().get(), fvalues_.data().get(), gpu_batch_nrows_); } tmp_size = ::max(tmp_size, cur_tmp_size); // size for inclusive scan if (has_weights_) { hipcub::DeviceScan::InclusiveSum(nullptr, cur_tmp_size, weights2_.begin(), weights_.begin(), gpu_batch_nrows_); tmp_size = ::max(tmp_size, cur_tmp_size); } // size for reduction by key hipcub::DeviceReduce::ReduceByKey(nullptr, cur_tmp_size, fvalues_.begin(), fvalues_cur_.begin(), weights_.begin(), weights2_.begin(), num_elements_.begin(), thrust::maximum<bst_float>(), gpu_batch_nrows_); tmp_size = ::max(tmp_size, cur_tmp_size); // size for filtering hipcub::DeviceSelect::If(nullptr, cur_tmp_size, fvalues_.begin(), fvalues_cur_.begin(), num_elements_.begin(), gpu_batch_nrows_, IsNotNaN()); tmp_size = ::max(tmp_size, cur_tmp_size); tmp_storage_.resize(tmp_size); } void Sketch(const SparsePage& row_batch, const MetaInfo& info) { // copy rows to the device dh::safe_cuda(hipSetDevice(device_)); const auto& offset_vec = row_batch.offset.HostVector(); row_ptrs_.resize(n_rows_ + 1); thrust::copy(offset_vec.data(), offset_vec.data() + n_rows_ + 1, row_ptrs_.begin()); size_t gpu_nbatches = common::DivRoundUp(n_rows_, gpu_batch_nrows_); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { SketchBatch(row_batch, info, gpu_batch); } } void SketchBatch(const SparsePage& row_batch, const MetaInfo& info, size_t gpu_batch) { // compute start and end indices size_t batch_row_begin = gpu_batch * gpu_batch_nrows_; size_t batch_row_end = ::min((gpu_batch + 1) * gpu_batch_nrows_, static_cast<size_t>(n_rows_)); size_t batch_nrows = batch_row_end - batch_row_begin; const auto& offset_vec = row_batch.offset.HostVector(); const auto& data_vec = row_batch.data.HostVector(); size_t n_entries = offset_vec[batch_row_end] - offset_vec[batch_row_begin]; // copy the batch to the GPU dh::safe_cuda(hipMemcpyAsync(entries_.data().get(), data_vec.data() + offset_vec[batch_row_begin], n_entries * sizeof(Entry), hipMemcpyDefault)); // copy the weights if necessary if (has_weights_) { const auto& weights_vec = info.weights_.HostVector(); dh::safe_cuda(hipMemcpyAsync(weights_.data().get(), weights_vec.data() + batch_row_begin, batch_nrows * sizeof(bst_float), hipMemcpyDefault)); } // unpack the features; also unpack weights if present thrust::fill(fvalues_.begin(), fvalues_.end(), NAN); if (has_weights_) { thrust::fill(feature_weights_.begin(), feature_weights_.end(), NAN); } dim3 block3(16, 64, 1); // NOTE: This will typically support ~ 4M features - 64K*64 dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(num_cols_, block3.y), 1); dh::LaunchKernel {grid3, block3} ( UnpackFeaturesK, fvalues_.data().get(), has_weights_ ? feature_weights_.data().get() : nullptr, row_ptrs_.data().get() + batch_row_begin, has_weights_ ? weights_.data().get() : nullptr, entries_.data().get(), gpu_batch_nrows_, offset_vec[batch_row_begin], batch_nrows); for (int icol = 0; icol < num_cols_; ++icol) { FindColumnCuts(batch_nrows, icol); } // add cuts into sketches thrust::copy(cuts_d_.begin(), cuts_d_.end(), cuts_h_.begin()); #pragma omp parallel for default(none) schedule(static) \ if (num_cols_ > SketchContainer::kOmpNumColsParallelizeLimit) // NOLINT for (int icol = 0; icol < num_cols_; ++icol) { WXQSketch::SummaryContainer summary; summary.Reserve(n_cuts_); summary.MakeFromSorted(&cuts_h_[n_cuts_ * icol], n_cuts_cur_[icol]); std::lock_guard<std::mutex> lock(sketch_container_->col_locks_[icol]); sketch_container_->sketches_[icol].PushSummary(summary); } } const int device_; const int max_bin_; int gpu_batch_nrows_; size_t row_stride_; std::unique_ptr<SketchContainer> sketch_container_; bst_uint n_rows_{}; int num_cols_{0}; size_t n_cuts_{0}; bool has_weights_{false}; dh::device_vector<size_t> row_ptrs_{}; dh::device_vector<Entry> entries_{}; dh::device_vector<bst_float> fvalues_{}; dh::device_vector<bst_float> feature_weights_{}; dh::device_vector<bst_float> fvalues_cur_{}; dh::device_vector<WXQSketch::Entry> cuts_d_{}; thrust::host_vector<WXQSketch::Entry> cuts_h_{}; dh::device_vector<bst_float> weights_{}; dh::device_vector<bst_float> weights2_{}; std::vector<size_t> n_cuts_cur_{}; dh::device_vector<size_t> num_elements_{}; dh::device_vector<char> tmp_storage_{}; }; size_t DeviceSketch(int device, int max_bin, int gpu_batch_nrows, DMatrix* dmat, HistogramCuts* hmat) { GPUSketcher sketcher(device, max_bin, gpu_batch_nrows); // We only need to return the result in HistogramCuts container, so it is safe to // use a pointer of local HistogramCutsDense DenseCuts dense_cuts(hmat); auto res = sketcher.Sketch(dmat, &dense_cuts); return res; } } // namespace common } // namespace xgboost
aed3f78d06fdc7729d8c56546214bd9acf3086aa.cu
/*! * Copyright 2018 XGBoost contributors */ #include <xgboost/logging.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <utility> #include <vector> #include <memory> #include <mutex> #include "hist_util.h" #include "xgboost/host_device_vector.h" #include "device_helpers.cuh" #include "quantile.h" #include "../tree/param.h" namespace xgboost { namespace common { using WXQSketch = DenseCuts::WXQSketch; __global__ void FindCutsK(WXQSketch::Entry* __restrict__ cuts, const bst_float* __restrict__ data, const float* __restrict__ cum_weights, int nsamples, int ncuts) { // ncuts < nsamples int icut = threadIdx.x + blockIdx.x * blockDim.x; if (icut >= ncuts) { return; } int isample = 0; if (icut == 0) { isample = 0; } else if (icut == ncuts - 1) { isample = nsamples - 1; } else { bst_float rank = cum_weights[nsamples - 1] / static_cast<float>(ncuts - 1) * static_cast<float>(icut); // -1 is used because cum_weights is an inclusive sum isample = dh::UpperBound(cum_weights, nsamples, rank); isample = max(0, min(isample, nsamples - 1)); } // repeated values will be filtered out on the CPU bst_float rmin = isample > 0 ? cum_weights[isample - 1] : 0; bst_float rmax = cum_weights[isample]; cuts[icut] = WXQSketch::Entry(rmin, rmax, rmax - rmin, data[isample]); } // predictate for thrust filtering that returns true if the element is not a NaN struct IsNotNaN { __device__ bool operator()(float a) const { return !isnan(a); } }; __global__ void UnpackFeaturesK(float* __restrict__ fvalues, float* __restrict__ feature_weights, const size_t* __restrict__ row_ptrs, const float* __restrict__ weights, Entry* entries, size_t nrows_array, size_t row_begin_ptr, size_t nrows) { size_t irow = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (irow >= nrows) { return; } size_t row_length = row_ptrs[irow + 1] - row_ptrs[irow]; int icol = threadIdx.y + blockIdx.y * blockDim.y; if (icol >= row_length) { return; } Entry entry = entries[row_ptrs[irow] - row_begin_ptr + icol]; size_t ind = entry.index * nrows_array + irow; // if weights are present, ensure that a non-NaN value is written to weights // if and only if it is also written to features if (!isnan(entry.fvalue) && (weights == nullptr || !isnan(weights[irow]))) { fvalues[ind] = entry.fvalue; if (feature_weights != nullptr && weights != nullptr) { feature_weights[ind] = weights[irow]; } } } /*! * \brief A container that holds the device sketches across all * sparse page batches which are distributed to different devices. * As sketches are aggregated by column, the mutex guards * multiple devices pushing sketch summary for the same column * across distinct rows. */ struct SketchContainer { std::vector<DenseCuts::WXQSketch> sketches_; // NOLINT std::vector<std::mutex> col_locks_; // NOLINT static constexpr int kOmpNumColsParallelizeLimit = 1000; SketchContainer(int max_bin, DMatrix* dmat) : col_locks_(dmat->Info().num_col_) { const MetaInfo& info = dmat->Info(); // Initialize Sketches for this dmatrix sketches_.resize(info.num_col_); #pragma omp parallel for default(none) shared(info, max_bin) schedule(static) \ if (info.num_col_ > kOmpNumColsParallelizeLimit) // NOLINT for (int icol = 0; icol < info.num_col_; ++icol) { // NOLINT sketches_[icol].Init(info.num_row_, 1.0 / (8 * max_bin)); } } // Prevent copying/assigning/moving this as its internals can't be assigned/copied/moved SketchContainer(const SketchContainer &) = delete; SketchContainer(const SketchContainer &&) = delete; SketchContainer &operator=(const SketchContainer &) = delete; SketchContainer &operator=(const SketchContainer &&) = delete; }; // finds quantiles on the GPU class GPUSketcher { public: GPUSketcher(int device, int max_bin, int gpu_nrows) : device_(device), max_bin_(max_bin), gpu_batch_nrows_(gpu_nrows), row_stride_(0) {} ~GPUSketcher() { // NOLINT dh::safe_cuda(cudaSetDevice(device_)); } void SketchBatch(const SparsePage &batch, const MetaInfo &info) { n_rows_ = batch.Size(); Init(batch, info, gpu_batch_nrows_); Sketch(batch, info); ComputeRowStride(); } /* Builds the sketches on the GPU for the dmatrix and returns the row stride * for the entire dataset */ size_t Sketch(DMatrix *dmat, DenseCuts *hmat) { const MetaInfo& info = dmat->Info(); row_stride_ = 0; sketch_container_.reset(new SketchContainer(max_bin_, dmat)); for (const auto& batch : dmat->GetBatches<SparsePage>()) { this->SketchBatch(batch, info); } hmat->Init(&sketch_container_->sketches_, max_bin_); return row_stride_; } // This needs to be public because of the __device__ lambda. void ComputeRowStride() { // Find the row stride for this batch auto row_iter = row_ptrs_.begin(); // Functor for finding the maximum row size for this batch auto get_size = [=] __device__(size_t row) { return row_iter[row + 1] - row_iter[row]; }; // NOLINT auto counting = thrust::make_counting_iterator(size_t(0)); using TransformT = thrust::transform_iterator<decltype(get_size), decltype(counting), size_t>; TransformT row_size_iter = TransformT(counting, get_size); size_t batch_row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows_, 0, thrust::maximum<size_t>()); row_stride_ = std::max(row_stride_, batch_row_stride); } // This needs to be public because of the __device__ lambda. void FindColumnCuts(size_t batch_nrows, size_t icol) { size_t tmp_size = tmp_storage_.size(); // filter out NaNs in feature values auto fvalues_begin = fvalues_.data() + icol * gpu_batch_nrows_; cub::DeviceSelect::If(tmp_storage_.data().get(), tmp_size, fvalues_begin, fvalues_cur_.data(), num_elements_.begin(), batch_nrows, IsNotNaN()); size_t nfvalues_cur = 0; thrust::copy_n(num_elements_.begin(), 1, &nfvalues_cur); // compute cumulative weights using a prefix scan if (has_weights_) { // filter out NaNs in weights; // since cub::DeviceSelect::If performs stable filtering, // the weights are stored in the correct positions auto feature_weights_begin = feature_weights_.data() + icol * gpu_batch_nrows_; cub::DeviceSelect::If(tmp_storage_.data().get(), tmp_size, feature_weights_begin, weights_.data().get(), num_elements_.begin(), batch_nrows, IsNotNaN()); // sort the values and weights cub::DeviceRadixSort::SortPairs(tmp_storage_.data().get(), tmp_size, fvalues_cur_.data().get(), fvalues_begin.get(), weights_.data().get(), weights2_.data().get(), nfvalues_cur); // sum the weights to get cumulative weight values cub::DeviceScan::InclusiveSum(tmp_storage_.data().get(), tmp_size, weights2_.begin(), weights_.begin(), nfvalues_cur); } else { // sort the batch values cub::DeviceRadixSort::SortKeys(tmp_storage_.data().get(), tmp_size, fvalues_cur_.data().get(), fvalues_begin.get(), nfvalues_cur); // fill in cumulative weights with counting iterator thrust::copy_n(thrust::make_counting_iterator(1), nfvalues_cur, weights_.begin()); } // remove repeated items and sum the weights across them; // non-negative weights are assumed cub::DeviceReduce::ReduceByKey(tmp_storage_.data().get(), tmp_size, fvalues_begin, fvalues_cur_.begin(), weights_.begin(), weights2_.begin(), num_elements_.begin(), thrust::maximum<bst_float>(), nfvalues_cur); size_t n_unique = 0; thrust::copy_n(num_elements_.begin(), 1, &n_unique); // extract cuts n_cuts_cur_[icol] = std::min(n_cuts_, n_unique); // if less elements than cuts: copy all elements with their weights if (n_cuts_ > n_unique) { float* weights2_ptr = weights2_.data().get(); float* fvalues_ptr = fvalues_cur_.data().get(); WXQSketch::Entry* cuts_ptr = cuts_d_.data().get() + icol * n_cuts_; dh::LaunchN(device_, n_unique, [=]__device__(size_t i) { bst_float rmax = weights2_ptr[i]; bst_float rmin = i > 0 ? weights2_ptr[i - 1] : 0; cuts_ptr[i] = WXQSketch::Entry(rmin, rmax, rmax - rmin, fvalues_ptr[i]); }); } else if (n_cuts_cur_[icol] > 0) { // if more elements than cuts: use binary search on cumulative weights uint32_t constexpr kBlockThreads = 256; uint32_t const kGrids = common::DivRoundUp(n_cuts_cur_[icol], kBlockThreads); dh::LaunchKernel {kGrids, kBlockThreads} ( FindCutsK, cuts_d_.data().get() + icol * n_cuts_, fvalues_cur_.data().get(), weights2_.data().get(), n_unique, n_cuts_cur_[icol]); dh::safe_cuda(cudaGetLastError()); // NOLINT } } private: void Init(const SparsePage& row_batch, const MetaInfo& info, int gpu_batch_nrows) { num_cols_ = info.num_col_; has_weights_ = info.weights_.Size() > 0; // find the batch size if (gpu_batch_nrows == 0) { // By default, use no more than 1/16th of GPU memory gpu_batch_nrows_ = dh::TotalMemory(device_) / (16 * num_cols_ * sizeof(Entry)); } else if (gpu_batch_nrows == -1) { gpu_batch_nrows_ = n_rows_; } else { gpu_batch_nrows_ = gpu_batch_nrows; } if (gpu_batch_nrows_ > n_rows_) { gpu_batch_nrows_ = n_rows_; } constexpr int kFactor = 8; double eps = 1.0 / (kFactor * max_bin_); size_t dummy_nlevel; WXQSketch::LimitSizeLevel(gpu_batch_nrows_, eps, &dummy_nlevel, &n_cuts_); // allocate necessary GPU buffers dh::safe_cuda(cudaSetDevice(device_)); entries_.resize(gpu_batch_nrows_ * num_cols_); fvalues_.resize(gpu_batch_nrows_ * num_cols_); fvalues_cur_.resize(gpu_batch_nrows_); cuts_d_.resize(n_cuts_ * num_cols_); cuts_h_.resize(n_cuts_ * num_cols_); weights_.resize(gpu_batch_nrows_); weights2_.resize(gpu_batch_nrows_); num_elements_.resize(1); if (has_weights_) { feature_weights_.resize(gpu_batch_nrows_ * num_cols_); } n_cuts_cur_.resize(num_cols_); // allocate storage for CUB algorithms; the size is the maximum of the sizes // required for various algorithm size_t tmp_size = 0, cur_tmp_size = 0; // size for sorting if (has_weights_) { cub::DeviceRadixSort::SortPairs(nullptr, cur_tmp_size, fvalues_cur_.data().get(), fvalues_.data().get(), weights_.data().get(), weights2_.data().get(), gpu_batch_nrows_); } else { cub::DeviceRadixSort::SortKeys(nullptr, cur_tmp_size, fvalues_cur_.data().get(), fvalues_.data().get(), gpu_batch_nrows_); } tmp_size = std::max(tmp_size, cur_tmp_size); // size for inclusive scan if (has_weights_) { cub::DeviceScan::InclusiveSum(nullptr, cur_tmp_size, weights2_.begin(), weights_.begin(), gpu_batch_nrows_); tmp_size = std::max(tmp_size, cur_tmp_size); } // size for reduction by key cub::DeviceReduce::ReduceByKey(nullptr, cur_tmp_size, fvalues_.begin(), fvalues_cur_.begin(), weights_.begin(), weights2_.begin(), num_elements_.begin(), thrust::maximum<bst_float>(), gpu_batch_nrows_); tmp_size = std::max(tmp_size, cur_tmp_size); // size for filtering cub::DeviceSelect::If(nullptr, cur_tmp_size, fvalues_.begin(), fvalues_cur_.begin(), num_elements_.begin(), gpu_batch_nrows_, IsNotNaN()); tmp_size = std::max(tmp_size, cur_tmp_size); tmp_storage_.resize(tmp_size); } void Sketch(const SparsePage& row_batch, const MetaInfo& info) { // copy rows to the device dh::safe_cuda(cudaSetDevice(device_)); const auto& offset_vec = row_batch.offset.HostVector(); row_ptrs_.resize(n_rows_ + 1); thrust::copy(offset_vec.data(), offset_vec.data() + n_rows_ + 1, row_ptrs_.begin()); size_t gpu_nbatches = common::DivRoundUp(n_rows_, gpu_batch_nrows_); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { SketchBatch(row_batch, info, gpu_batch); } } void SketchBatch(const SparsePage& row_batch, const MetaInfo& info, size_t gpu_batch) { // compute start and end indices size_t batch_row_begin = gpu_batch * gpu_batch_nrows_; size_t batch_row_end = std::min((gpu_batch + 1) * gpu_batch_nrows_, static_cast<size_t>(n_rows_)); size_t batch_nrows = batch_row_end - batch_row_begin; const auto& offset_vec = row_batch.offset.HostVector(); const auto& data_vec = row_batch.data.HostVector(); size_t n_entries = offset_vec[batch_row_end] - offset_vec[batch_row_begin]; // copy the batch to the GPU dh::safe_cuda(cudaMemcpyAsync(entries_.data().get(), data_vec.data() + offset_vec[batch_row_begin], n_entries * sizeof(Entry), cudaMemcpyDefault)); // copy the weights if necessary if (has_weights_) { const auto& weights_vec = info.weights_.HostVector(); dh::safe_cuda(cudaMemcpyAsync(weights_.data().get(), weights_vec.data() + batch_row_begin, batch_nrows * sizeof(bst_float), cudaMemcpyDefault)); } // unpack the features; also unpack weights if present thrust::fill(fvalues_.begin(), fvalues_.end(), NAN); if (has_weights_) { thrust::fill(feature_weights_.begin(), feature_weights_.end(), NAN); } dim3 block3(16, 64, 1); // NOTE: This will typically support ~ 4M features - 64K*64 dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(num_cols_, block3.y), 1); dh::LaunchKernel {grid3, block3} ( UnpackFeaturesK, fvalues_.data().get(), has_weights_ ? feature_weights_.data().get() : nullptr, row_ptrs_.data().get() + batch_row_begin, has_weights_ ? weights_.data().get() : nullptr, entries_.data().get(), gpu_batch_nrows_, offset_vec[batch_row_begin], batch_nrows); for (int icol = 0; icol < num_cols_; ++icol) { FindColumnCuts(batch_nrows, icol); } // add cuts into sketches thrust::copy(cuts_d_.begin(), cuts_d_.end(), cuts_h_.begin()); #pragma omp parallel for default(none) schedule(static) \ if (num_cols_ > SketchContainer::kOmpNumColsParallelizeLimit) // NOLINT for (int icol = 0; icol < num_cols_; ++icol) { WXQSketch::SummaryContainer summary; summary.Reserve(n_cuts_); summary.MakeFromSorted(&cuts_h_[n_cuts_ * icol], n_cuts_cur_[icol]); std::lock_guard<std::mutex> lock(sketch_container_->col_locks_[icol]); sketch_container_->sketches_[icol].PushSummary(summary); } } const int device_; const int max_bin_; int gpu_batch_nrows_; size_t row_stride_; std::unique_ptr<SketchContainer> sketch_container_; bst_uint n_rows_{}; int num_cols_{0}; size_t n_cuts_{0}; bool has_weights_{false}; dh::device_vector<size_t> row_ptrs_{}; dh::device_vector<Entry> entries_{}; dh::device_vector<bst_float> fvalues_{}; dh::device_vector<bst_float> feature_weights_{}; dh::device_vector<bst_float> fvalues_cur_{}; dh::device_vector<WXQSketch::Entry> cuts_d_{}; thrust::host_vector<WXQSketch::Entry> cuts_h_{}; dh::device_vector<bst_float> weights_{}; dh::device_vector<bst_float> weights2_{}; std::vector<size_t> n_cuts_cur_{}; dh::device_vector<size_t> num_elements_{}; dh::device_vector<char> tmp_storage_{}; }; size_t DeviceSketch(int device, int max_bin, int gpu_batch_nrows, DMatrix* dmat, HistogramCuts* hmat) { GPUSketcher sketcher(device, max_bin, gpu_batch_nrows); // We only need to return the result in HistogramCuts container, so it is safe to // use a pointer of local HistogramCutsDense DenseCuts dense_cuts(hmat); auto res = sketcher.Sketch(dmat, &dense_cuts); return res; } } // namespace common } // namespace xgboost
b2c08705b95f73cdc49611dd432ebf3fbfbf1001.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "grad_parallel.h" #include <stdlib.h> #include "get_error.h" #definehipLaunchKernelGGL(( GCHAINS) , dim3(dG[c]), dim3(dB[c]) , 0, 0, GradParallelLinked::GradParallelLinkedGrids* grids, int jtwist) : grids_(grids) { nLinks = nullptr; nChains = nullptr; ikxLinked_h = nullptr; ikyLinked_h = nullptr; ikxLinked = nullptr; ikyLinked = nullptr; kzLinked = nullptr; G_linked = nullptr; dG = nullptr; dB = nullptr; zft_plan_forward = nullptr; zft_plan_inverse = nullptr; zft_plan_forward_singlemom = nullptr; // zft_plan_inverse_singlemom = nullptr; dz_plan_forward = nullptr; dz_plan_inverse = nullptr; dz_plan_forward_singlemom = nullptr; dz_plan_inverse_singlemom = nullptr; abs_dz_plan_forward_singlemom = nullptr; int naky = grids_->Naky; int nakx = grids_->Nakx; int idxRight[naky*nakx]; int idxLeft[naky*nakx]; int linksR[naky*nakx]; int linksL[naky*nakx]; int n_k[naky*nakx]; nClasses = get_nClasses(idxRight, idxLeft, linksR, linksL, n_k, naky, nakx, jtwist); nLinks = (int*) malloc(sizeof(int)*nClasses); nChains = (int*) malloc(sizeof(int)*nClasses); get_nLinks_nChains(nLinks, nChains, n_k, nClasses, naky, nakx); ikxLinked_h = (int**) malloc(sizeof(int*)*nClasses); ikyLinked_h = (int**) malloc(sizeof(int*)*nClasses); for(int c=0; c<nClasses; c++) { ikxLinked_h[c] = (int*) malloc(sizeof(int)*nLinks[c]*nChains[c]); ikyLinked_h[c] = (int*) malloc(sizeof(int)*nLinks[c]*nChains[c]); } kFill(nClasses, nChains, nLinks, ikyLinked_h, ikxLinked_h, linksL, linksR, idxRight, naky, nakx); dG = (dim3*) malloc(sizeof(dim3)*nClasses); dB = (dim3*) malloc(sizeof(dim3)*nClasses); hipHostMalloc ((void**) &zft_plan_forward, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &zft_plan_inverse, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &zft_plan_forward_singlemom, sizeof(hipfftHandle*)*nClasses); // hipHostMalloc ((void**) &zft_plan_inverse_singlemom, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &dz_plan_forward, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &dz_plan_inverse, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &dz_plan_forward_singlemom, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &dz_plan_inverse_singlemom, sizeof(hipfftHandle*)*nClasses); hipHostMalloc ((void**) &abs_dz_plan_forward_singlemom, sizeof(hipfftHandle*)*nClasses); // these are arrays of pointers to device memory hipHostMalloc ((void**) &ikxLinked, sizeof(int*) *nClasses); hipHostMalloc ((void**) &ikyLinked, sizeof(int*) *nClasses); hipHostMalloc ((void**) &G_linked, sizeof(hipComplex*)*nClasses); hipHostMalloc ((void**) &kzLinked, sizeof(float*) *nClasses); // printf("nClasses = %d\n", nClasses); for(int c=0; c<nClasses; c++) { // printf("\tClass %d: nChains = %d, nLinks = %d\n", c, nChains[c], nLinks[c]); // allocate and copy into device memory int nLC = nLinks[c]*nChains[c]; hipMalloc ((void**) &ikxLinked[c], sizeof(int)*nLC); hipMalloc ((void**) &ikyLinked[c], sizeof(int)*nLC); CP_TO_GPU(ikxLinked[c], ikxLinked_h[c], sizeof(int)*nLC); CP_TO_GPU(ikyLinked[c], ikyLinked_h[c], sizeof(int)*nLC); size_t sLClmz = sizeof(hipComplex)*nLC*grids_->Nl*grids_->Nm*grids_->Nz; checkCuda(hipMalloc((void**) &G_linked[c], sLClmz)); hipMemset(G_linked[c], 0., sLClmz); hipMalloc((void**) &kzLinked[c], sizeof(float)*grids_->Nz*nLinks[c]); hipMemset(kzLinked[c], 0., sizeof(float)*grids_->Nz*nLinks[c]); // set up transforms hipfftCreate( &zft_plan_forward[c]); hipfftCreate( &zft_plan_inverse[c]); hipfftCreate( &zft_plan_forward_singlemom[c]); // hipfftCreate( &zft_plan_inverse_singlemom[c]); hipfftCreate( &dz_plan_forward[c]); hipfftCreate( &dz_plan_inverse[c]); hipfftCreate( &dz_plan_forward_singlemom[c]); // hipfftCreate( &dz_plan_inverse_singlemom[c]); hipfftCreate(&abs_dz_plan_forward_singlemom[c]); int size = nLinks[c]*grids_->Nz; size_t workSize; int nClm = nChains[c]*grids_->Nl*grids_->Nm; hipfftMakePlanMany(zft_plan_forward[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nClm, &workSize); hipfftMakePlanMany(zft_plan_inverse[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nClm, &workSize); hipfftMakePlanMany(zft_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nChains[c], &workSize); // hipfftMakePlanMany(zft_plan_inverse_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nChains[c], &workSize); hipfftMakePlanMany(dz_plan_forward[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nClm, &workSize); hipfftMakePlanMany(dz_plan_inverse[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nClm, &workSize); hipfftMakePlanMany(dz_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nChains[c], &workSize); hipfftMakePlanMany(dz_plan_inverse_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nChains[c], &workSize); hipfftMakePlanMany(abs_dz_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, HIPFFT_C2C, nChains[c], &workSize); // initialize kzLinked hipLaunchKernelGGL(( init_kzLinked) , dim3(1),dim3(1), 0, 0, kzLinked[c], nLinks[c]); int nn1, nn2, nn3, nt1, nt2, nt3, nb1, nb2, nb3; nn1 = grids_->Nz; nt1 = min( nn1, 32 ); nb1 = 1 + (nn1-1)/nt1; nn2 = nLinks[c]*nChains[c]; nt2 = min( nn2, 4 ); nb2 = 1 + (nn2-1)/nt2; nn3 = grids_->Nmoms; nt3 = min( nn3, 4 ); nb3 = 1 + (nn3-1)/nt3; dB[c] = dim3(nt1, nt2, nt3); dG[c] = dim3(nb1, nb2, nb3); // dB[c] = dim3(32,4,4); // dG[c] = dim3(1 + (grids_->Nz-1)/dB[c].x, // 1 + (nLinks[c]*nChains[c]-1)/dB[c].y, // 1 + (grids_->Nmoms-1)/dB[c].z); } set_callbacks(); // this->linkPrint(); } GradParallelLinked::~GradParallelLinked() { if (nLinks) free(nLinks); if (nChains) free(nChains); if (dB) free(dB); if (dG) free(dG); for(int c=0; c<nClasses; c++) { hipfftDestroy( zft_plan_forward[c] ); hipfftDestroy( zft_plan_inverse[c] ); hipfftDestroy( zft_plan_forward_singlemom[c]); // hipfftDestroy( zft_plan_inverse_singlemom[c]); hipfftDestroy( dz_plan_forward[c] ); hipfftDestroy( dz_plan_inverse[c] ); hipfftDestroy( dz_plan_forward_singlemom[c] ); hipfftDestroy( dz_plan_inverse_singlemom[c] ); hipfftDestroy(abs_dz_plan_forward_singlemom[c]); if (ikxLinked_h[c]) free(ikxLinked_h[c]); if (ikyLinked_h[c]) free(ikyLinked_h[c]); if (ikxLinked[c]) hipFree(ikxLinked[c]); if (ikyLinked[c]) hipFree(ikyLinked[c]); if (kzLinked[c]) hipFree(kzLinked[c]); if (G_linked[c]) hipFree(G_linked[c]); } if (zft_plan_forward) hipHostFree( zft_plan_forward); if (zft_plan_inverse) hipHostFree( zft_plan_inverse); if (zft_plan_forward_singlemom) hipHostFree( zft_plan_forward_singlemom); // if (zft_plan_inverse_singlemom) hipHostFree( zft_plan_inverse_singlemom); if (dz_plan_forward) hipHostFree( dz_plan_forward); if (dz_plan_inverse) hipHostFree( dz_plan_inverse); if (dz_plan_forward_singlemom) hipHostFree( dz_plan_forward_singlemom); if (dz_plan_inverse_singlemom) hipHostFree( dz_plan_inverse_singlemom); if (abs_dz_plan_forward_singlemom) hipHostFree(abs_dz_plan_forward_singlemom); if (ikxLinked_h) free(ikxLinked_h); if (ikyLinked_h) free(ikyLinked_h); if (ikxLinked) hipHostFree(ikxLinked); if (ikyLinked) hipHostFree(ikyLinked); if (G_linked) hipHostFree(G_linked); if (kzLinked) hipHostFree(kzLinked); } void GradParallelLinked::zft(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); hipfftExecC2C (zft_plan_forward[c], G_linked[c], G_linked[c], HIPFFT_FORWARD); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } void GradParallelLinked::zft_inverse(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); hipfftExecC2C (zft_plan_inverse[c], G_linked[c], G_linked[c], HIPFFT_BACKWARD); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } // for a single moment m void GradParallelLinked::zft(hipComplex* m, hipComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); hipfftExecC2C(zft_plan_forward_singlemom[c], G_linked[c], G_linked[c], HIPFFT_FORWARD); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } /* // for a single moment m void GradParallelLinked::zft_inverse(hipComplex* m, hipComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); hipfftExecC2C(zft_plan_inverse_singlemom[c], G_linked[c], G_linked[c], HIPFFT_BACKWARD); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } */ void GradParallelLinked::dz(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { // each "class" has a different number of links in the chains, and a different number of chains. linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); hipfftExecC2C (dz_plan_forward[c], G_linked[c], G_linked[c], HIPFFT_FORWARD); hipfftExecC2C (dz_plan_inverse[c], G_linked[c], G_linked[c], HIPFFT_BACKWARD); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } // for a single moment m void GradParallelLinked::dz(hipComplex* m, hipComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); hipfftExecC2C(dz_plan_forward_singlemom[c], G_linked[c], G_linked[c], HIPFFT_FORWARD); hipfftExecC2C(dz_plan_inverse_singlemom[c], G_linked[c], G_linked[c], HIPFFT_BACKWARD); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } // for a single moment m void GradParallelLinked::abs_dz(hipComplex* m, hipComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); hipfftExecC2C(abs_dz_plan_forward_singlemom[c], G_linked[c], G_linked[c], HIPFFT_FORWARD); hipfftExecC2C( dz_plan_inverse_singlemom[c], G_linked[c], G_linked[c], HIPFFT_BACKWARD); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } int GradParallelLinked::get_nClasses(int *idxRight, int *idxLeft, int *linksR, int *linksL, int *n_k, int naky, int nakx, int jshift0) { int idx0, idxL, idxR; // printf("naky, nakx, jshift0 = %d \t %d \t %d \n",naky, nakx, jshift0); for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { //map indices to kx indices if(idx < (nakx+1)/2 ) { idx0 = idx; } else { idx0 = idx - nakx; } if(idy == 0) { idxL = idx0; idxR = idx0; } else { // signs here are correct according to Mike Beer's thesis idxL = idx0 + idy*jshift0; idxR = idx0 - idy*jshift0; } //remap to usual indices if(idxL >= 0 && idxL < (nakx+1)/2) { idxLeft[idy + naky*idx] = idxL; } else if( idxL+nakx >= (nakx+1)/2 && idxL+nakx < nakx ) { idxLeft[idy + naky*idx] = idxL + nakx; //nshift } else { idxLeft[idy + naky*idx] = -1; } if(idxR >= 0 && idxR < (nakx+1)/2) { idxRight[idy + naky*idx] = idxR; } else if( idxR+nakx >= (nakx+1)/2 && idxR+nakx <nakx ) { idxRight[idy + naky*idx] = idxR + nakx; } else { idxRight[idy + naky*idx] = -1; } } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("idxLeft[%d,%d]= %d ", idy, idx, idxLeft[idy + naky*idx]); } printf("\n"); } for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("idxRight[%d,%d]= %d ", idy, idx, idxRight[idy + naky*idx]); } printf("\n"); } */ for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { //count the links for each region //linksL = number of links to the left of current position linksL[idy + naky*idx] = 0; int idx_star = idx; while(idx_star != idxLeft[idy + naky*idx_star] && idxLeft[idy + naky*(idx_star)] >= 0) { //increment left links counter, and move to next link to left //until idx of link to left is negative or same as current idx linksL[idy + naky*idx]++; idx_star = idxLeft[idy + naky*(idx_star)]; } //linksR = number of links to the right linksR[idy + naky*idx] = 0; idx_star = idx; while(idx_star != idxRight[idy + naky*idx_star] && idxRight[idy + naky*idx_star] >= 0) { linksR[idy + naky*idx]++; idx_star = idxRight[idy + naky*idx_star]; } } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("linksL[%d,%d]= %d ", idy, idx, linksL[idy + naky*idx]); } printf("\n"); } for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("linksR[%d,%d]= %d ", idy, idx, linksR[idy + naky*idx]); } printf("\n"); } */ //now we set up class array //nClasses = # of classes //first count number of links for each (kx,ky) int k = 0; for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { n_k[k] = 1 + linksL[idy + naky*idx] + linksR[idy + naky*idx]; k++; } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("nLinks[%d,%d]= %d ", idy, idx, n_k[idy+naky*idx]); } printf("\n"); } */ //count how many unique values of n_k there are, which is the number of classes //sort... qsort(n_k, naky*nakx, sizeof(int), compare); //then count int nClasses = 1; for(int k=0; k<naky*nakx-1; k++) { if(n_k[k] != n_k[k+1]) nClasses= nClasses + 1; } return nClasses; } void GradParallelLinked::get_nLinks_nChains(int *nLinks, int *nChains, int *n_k, int nClasses, int naky, int nakx) { for(int c=0; c<nClasses; c++) { nChains[c] = 1; nLinks[c] = 0; } //fill the nChains and nLinks arrays int c = 0; for(int k=1; k<naky*nakx; k++) { if(n_k[k] == n_k[k-1]) nChains[c]++; else { nLinks[c] = n_k[k-1]; nChains[c] = nChains[c]/nLinks[c]; c++; } } c = nClasses-1; nLinks[c] = n_k[naky*nakx-1]; nChains[c] = nChains[c]/nLinks[c]; } void kt2ki(int idy, int idx, int *c, int *p, int* linksL, int* linksR, int nClasses, int* nLinks, int naky) { //get nLinks in the current chain int np_k = 1 + linksL[idy + naky*idx] + linksR[idy + naky*idx]; //find which class corresponds to this nLinks for(int i=0; i<nClasses; i++) { if(nLinks[i] == np_k) { *c= i; break; } } *p = linksL[idy + naky*idx]; } void fill(int *ky, int *kx, int idy, int idx, int *idxRight, int c, int p, int n, int naky, int nakx, int nshift, int nLinks) { int idx0; if(idx < (nakx+1)/2) idx0=idx; else idx0=idx+nshift; ky[p+nLinks*n] = idy; kx[p+nLinks*n] = idx0; int idxR=idx; for(p=1; p<nLinks; p++) { idxR = idxRight[idy + naky*idxR]; ky[p + nLinks*n] = idy; if(idxR < (nakx+1)/2) { kx[p + nLinks*n] = idxR; } else { kx[p + nLinks*n] = idxR+nshift; } } } void GradParallelLinked::kFill(int nClasses, int *nChains, int *nLinks, int **ky, int **kx, int *linksL, int *linksR, int *idxRight, int naky, int nakx) { int nshift = grids_->Nx-nakx; //fill the kx and ky arrays for(int ic=0; ic<nClasses; ic++) { int n = 0; int p, c; for(int idy=0; idy<naky; idy++) { for(int idx=0; idx<nakx; idx++) { kt2ki(idy, idx, &c, &p, linksL, linksR, nClasses, nLinks, naky); if(c==ic) { if(p==0) { fill(ky[c], kx[c], idy, idx, idxRight, c, p, n, naky, nakx, nshift, nLinks[c]); n++; } } } } } } void GradParallelLinked::linkPrint() { printf("Printing links...\n"); for(int c=0; c<nClasses; c++) { for(int n=0; n<nChains[c]; n++) { for(int p=0; p<nLinks[c]; p++) { if(ikxLinked_h[c][p+nLinks[c]*n]<(grids_->Nx-1)/3+1) { printf("(%d,%d) ", ikyLinked_h[c][p+nLinks[c]*n], ikxLinked_h[c][p+nLinks[c]*n]); } else { printf("(%d,%d) ",ikyLinked_h[c][p+nLinks[c]*n], ikxLinked_h[c][p+nLinks[c]*n]-grids_->Nx); } if(ikxLinked_h[c][p+nLinks[c]*n]>(grids_->Nx-1)/3 && ikxLinked_h[c][p+nLinks[c]*n]<2*(grids_->Nx/3)+1) { printf("->DEALIASING ERROR"); } /* *counter= *counter+1; */ } printf("\n"); } printf("\n\n"); } } void GradParallelLinked::set_callbacks() { for(int c=0; c<nClasses; c++) { // set up callback functions hipDeviceSynchronize(); cufftXtSetCallback( zft_plan_forward[c], (void**) &zfts_Linked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback( dz_plan_forward[c], (void**) &i_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback( dz_plan_forward_singlemom[c], (void**) &i_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback(abs_dz_plan_forward_singlemom[c], (void**) &abs_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); hipDeviceSynchronize(); checkCuda(hipGetLastError()); } } void GradParallelLinked::clear_callbacks() { for(int c=0; c<nClasses; c++) { // set up callback functions hipDeviceSynchronize(); cufftXtClearCallback( zft_plan_inverse[c], CUFFT_CB_ST_COMPLEX); // cufftXtClearCallback( zft_plan_inverse_singlemom[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback( dz_plan_forward[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback( dz_plan_forward_singlemom[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback(abs_dz_plan_forward_singlemom[c], CUFFT_CB_ST_COMPLEX); hipDeviceSynchronize(); checkCuda(hipGetLastError()); } }
b2c08705b95f73cdc49611dd432ebf3fbfbf1001.cu
#include "grad_parallel.h" #include <stdlib.h> #include "get_error.h" #define GCHAINS <<< dG[c], dB[c] >>> GradParallelLinked::GradParallelLinked(Grids* grids, int jtwist) : grids_(grids) { nLinks = nullptr; nChains = nullptr; ikxLinked_h = nullptr; ikyLinked_h = nullptr; ikxLinked = nullptr; ikyLinked = nullptr; kzLinked = nullptr; G_linked = nullptr; dG = nullptr; dB = nullptr; zft_plan_forward = nullptr; zft_plan_inverse = nullptr; zft_plan_forward_singlemom = nullptr; // zft_plan_inverse_singlemom = nullptr; dz_plan_forward = nullptr; dz_plan_inverse = nullptr; dz_plan_forward_singlemom = nullptr; dz_plan_inverse_singlemom = nullptr; abs_dz_plan_forward_singlemom = nullptr; int naky = grids_->Naky; int nakx = grids_->Nakx; int idxRight[naky*nakx]; int idxLeft[naky*nakx]; int linksR[naky*nakx]; int linksL[naky*nakx]; int n_k[naky*nakx]; nClasses = get_nClasses(idxRight, idxLeft, linksR, linksL, n_k, naky, nakx, jtwist); nLinks = (int*) malloc(sizeof(int)*nClasses); nChains = (int*) malloc(sizeof(int)*nClasses); get_nLinks_nChains(nLinks, nChains, n_k, nClasses, naky, nakx); ikxLinked_h = (int**) malloc(sizeof(int*)*nClasses); ikyLinked_h = (int**) malloc(sizeof(int*)*nClasses); for(int c=0; c<nClasses; c++) { ikxLinked_h[c] = (int*) malloc(sizeof(int)*nLinks[c]*nChains[c]); ikyLinked_h[c] = (int*) malloc(sizeof(int)*nLinks[c]*nChains[c]); } kFill(nClasses, nChains, nLinks, ikyLinked_h, ikxLinked_h, linksL, linksR, idxRight, naky, nakx); dG = (dim3*) malloc(sizeof(dim3)*nClasses); dB = (dim3*) malloc(sizeof(dim3)*nClasses); cudaMallocHost ((void**) &zft_plan_forward, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &zft_plan_inverse, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &zft_plan_forward_singlemom, sizeof(cufftHandle*)*nClasses); // cudaMallocHost ((void**) &zft_plan_inverse_singlemom, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &dz_plan_forward, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &dz_plan_inverse, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &dz_plan_forward_singlemom, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &dz_plan_inverse_singlemom, sizeof(cufftHandle*)*nClasses); cudaMallocHost ((void**) &abs_dz_plan_forward_singlemom, sizeof(cufftHandle*)*nClasses); // these are arrays of pointers to device memory cudaMallocHost ((void**) &ikxLinked, sizeof(int*) *nClasses); cudaMallocHost ((void**) &ikyLinked, sizeof(int*) *nClasses); cudaMallocHost ((void**) &G_linked, sizeof(cuComplex*)*nClasses); cudaMallocHost ((void**) &kzLinked, sizeof(float*) *nClasses); // printf("nClasses = %d\n", nClasses); for(int c=0; c<nClasses; c++) { // printf("\tClass %d: nChains = %d, nLinks = %d\n", c, nChains[c], nLinks[c]); // allocate and copy into device memory int nLC = nLinks[c]*nChains[c]; cudaMalloc ((void**) &ikxLinked[c], sizeof(int)*nLC); cudaMalloc ((void**) &ikyLinked[c], sizeof(int)*nLC); CP_TO_GPU(ikxLinked[c], ikxLinked_h[c], sizeof(int)*nLC); CP_TO_GPU(ikyLinked[c], ikyLinked_h[c], sizeof(int)*nLC); size_t sLClmz = sizeof(cuComplex)*nLC*grids_->Nl*grids_->Nm*grids_->Nz; checkCuda(cudaMalloc((void**) &G_linked[c], sLClmz)); cudaMemset(G_linked[c], 0., sLClmz); cudaMalloc((void**) &kzLinked[c], sizeof(float)*grids_->Nz*nLinks[c]); cudaMemset(kzLinked[c], 0., sizeof(float)*grids_->Nz*nLinks[c]); // set up transforms cufftCreate( &zft_plan_forward[c]); cufftCreate( &zft_plan_inverse[c]); cufftCreate( &zft_plan_forward_singlemom[c]); // cufftCreate( &zft_plan_inverse_singlemom[c]); cufftCreate( &dz_plan_forward[c]); cufftCreate( &dz_plan_inverse[c]); cufftCreate( &dz_plan_forward_singlemom[c]); // cufftCreate( &dz_plan_inverse_singlemom[c]); cufftCreate(&abs_dz_plan_forward_singlemom[c]); int size = nLinks[c]*grids_->Nz; size_t workSize; int nClm = nChains[c]*grids_->Nl*grids_->Nm; cufftMakePlanMany(zft_plan_forward[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nClm, &workSize); cufftMakePlanMany(zft_plan_inverse[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nClm, &workSize); cufftMakePlanMany(zft_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nChains[c], &workSize); // cufftMakePlanMany(zft_plan_inverse_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nChains[c], &workSize); cufftMakePlanMany(dz_plan_forward[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nClm, &workSize); cufftMakePlanMany(dz_plan_inverse[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nClm, &workSize); cufftMakePlanMany(dz_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nChains[c], &workSize); cufftMakePlanMany(dz_plan_inverse_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nChains[c], &workSize); cufftMakePlanMany(abs_dz_plan_forward_singlemom[c], 1, &size, NULL, 1, 0, NULL, 1, 0, CUFFT_C2C, nChains[c], &workSize); // initialize kzLinked init_kzLinked <<<1,1>>> (kzLinked[c], nLinks[c]); int nn1, nn2, nn3, nt1, nt2, nt3, nb1, nb2, nb3; nn1 = grids_->Nz; nt1 = min( nn1, 32 ); nb1 = 1 + (nn1-1)/nt1; nn2 = nLinks[c]*nChains[c]; nt2 = min( nn2, 4 ); nb2 = 1 + (nn2-1)/nt2; nn3 = grids_->Nmoms; nt3 = min( nn3, 4 ); nb3 = 1 + (nn3-1)/nt3; dB[c] = dim3(nt1, nt2, nt3); dG[c] = dim3(nb1, nb2, nb3); // dB[c] = dim3(32,4,4); // dG[c] = dim3(1 + (grids_->Nz-1)/dB[c].x, // 1 + (nLinks[c]*nChains[c]-1)/dB[c].y, // 1 + (grids_->Nmoms-1)/dB[c].z); } set_callbacks(); // this->linkPrint(); } GradParallelLinked::~GradParallelLinked() { if (nLinks) free(nLinks); if (nChains) free(nChains); if (dB) free(dB); if (dG) free(dG); for(int c=0; c<nClasses; c++) { cufftDestroy( zft_plan_forward[c] ); cufftDestroy( zft_plan_inverse[c] ); cufftDestroy( zft_plan_forward_singlemom[c]); // cufftDestroy( zft_plan_inverse_singlemom[c]); cufftDestroy( dz_plan_forward[c] ); cufftDestroy( dz_plan_inverse[c] ); cufftDestroy( dz_plan_forward_singlemom[c] ); cufftDestroy( dz_plan_inverse_singlemom[c] ); cufftDestroy(abs_dz_plan_forward_singlemom[c]); if (ikxLinked_h[c]) free(ikxLinked_h[c]); if (ikyLinked_h[c]) free(ikyLinked_h[c]); if (ikxLinked[c]) cudaFree(ikxLinked[c]); if (ikyLinked[c]) cudaFree(ikyLinked[c]); if (kzLinked[c]) cudaFree(kzLinked[c]); if (G_linked[c]) cudaFree(G_linked[c]); } if (zft_plan_forward) cudaFreeHost( zft_plan_forward); if (zft_plan_inverse) cudaFreeHost( zft_plan_inverse); if (zft_plan_forward_singlemom) cudaFreeHost( zft_plan_forward_singlemom); // if (zft_plan_inverse_singlemom) cudaFreeHost( zft_plan_inverse_singlemom); if (dz_plan_forward) cudaFreeHost( dz_plan_forward); if (dz_plan_inverse) cudaFreeHost( dz_plan_inverse); if (dz_plan_forward_singlemom) cudaFreeHost( dz_plan_forward_singlemom); if (dz_plan_inverse_singlemom) cudaFreeHost( dz_plan_inverse_singlemom); if (abs_dz_plan_forward_singlemom) cudaFreeHost(abs_dz_plan_forward_singlemom); if (ikxLinked_h) free(ikxLinked_h); if (ikyLinked_h) free(ikyLinked_h); if (ikxLinked) cudaFreeHost(ikxLinked); if (ikyLinked) cudaFreeHost(ikyLinked); if (G_linked) cudaFreeHost(G_linked); if (kzLinked) cudaFreeHost(kzLinked); } void GradParallelLinked::zft(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); cufftExecC2C (zft_plan_forward[c], G_linked[c], G_linked[c], CUFFT_FORWARD); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } void GradParallelLinked::zft_inverse(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); cufftExecC2C (zft_plan_inverse[c], G_linked[c], G_linked[c], CUFFT_INVERSE); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } // for a single moment m void GradParallelLinked::zft(cuComplex* m, cuComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); cufftExecC2C(zft_plan_forward_singlemom[c], G_linked[c], G_linked[c], CUFFT_FORWARD); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } /* // for a single moment m void GradParallelLinked::zft_inverse(cuComplex* m, cuComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); cufftExecC2C(zft_plan_inverse_singlemom[c], G_linked[c], G_linked[c], CUFFT_INVERSE); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } */ void GradParallelLinked::dz(MomentsG* G) { for (int is=0; is < grids_->Nspecies; is++) { for(int c=0; c<nClasses; c++) { // each "class" has a different number of links in the chains, and a different number of chains. linkedCopy GCHAINS (G->G(0,0,is), G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); cufftExecC2C (dz_plan_forward[c], G_linked[c], G_linked[c], CUFFT_FORWARD); cufftExecC2C (dz_plan_inverse[c], G_linked[c], G_linked[c], CUFFT_INVERSE); linkedCopyBack GCHAINS (G_linked[c], G->G(0,0,is), nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], grids_->Nmoms); } } } // for a single moment m void GradParallelLinked::dz(cuComplex* m, cuComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); cufftExecC2C(dz_plan_forward_singlemom[c], G_linked[c], G_linked[c], CUFFT_FORWARD); cufftExecC2C(dz_plan_inverse_singlemom[c], G_linked[c], G_linked[c], CUFFT_INVERSE); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } // for a single moment m void GradParallelLinked::abs_dz(cuComplex* m, cuComplex* res) { int nMoms=1; for(int c=0; c<nClasses; c++) { // these only use the G(0,0) part of G_linked linkedCopy GCHAINS (m, G_linked[c], nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); cufftExecC2C(abs_dz_plan_forward_singlemom[c], G_linked[c], G_linked[c], CUFFT_FORWARD); cufftExecC2C( dz_plan_inverse_singlemom[c], G_linked[c], G_linked[c], CUFFT_INVERSE); linkedCopyBack GCHAINS (G_linked[c], res, nLinks[c], nChains[c], ikxLinked[c], ikyLinked[c], nMoms); } } int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } int GradParallelLinked::get_nClasses(int *idxRight, int *idxLeft, int *linksR, int *linksL, int *n_k, int naky, int nakx, int jshift0) { int idx0, idxL, idxR; // printf("naky, nakx, jshift0 = %d \t %d \t %d \n",naky, nakx, jshift0); for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { //map indices to kx indices if(idx < (nakx+1)/2 ) { idx0 = idx; } else { idx0 = idx - nakx; } if(idy == 0) { idxL = idx0; idxR = idx0; } else { // signs here are correct according to Mike Beer's thesis idxL = idx0 + idy*jshift0; idxR = idx0 - idy*jshift0; } //remap to usual indices if(idxL >= 0 && idxL < (nakx+1)/2) { idxLeft[idy + naky*idx] = idxL; } else if( idxL+nakx >= (nakx+1)/2 && idxL+nakx < nakx ) { idxLeft[idy + naky*idx] = idxL + nakx; //nshift } else { idxLeft[idy + naky*idx] = -1; } if(idxR >= 0 && idxR < (nakx+1)/2) { idxRight[idy + naky*idx] = idxR; } else if( idxR+nakx >= (nakx+1)/2 && idxR+nakx <nakx ) { idxRight[idy + naky*idx] = idxR + nakx; } else { idxRight[idy + naky*idx] = -1; } } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("idxLeft[%d,%d]= %d ", idy, idx, idxLeft[idy + naky*idx]); } printf("\n"); } for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("idxRight[%d,%d]= %d ", idy, idx, idxRight[idy + naky*idx]); } printf("\n"); } */ for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { //count the links for each region //linksL = number of links to the left of current position linksL[idy + naky*idx] = 0; int idx_star = idx; while(idx_star != idxLeft[idy + naky*idx_star] && idxLeft[idy + naky*(idx_star)] >= 0) { //increment left links counter, and move to next link to left //until idx of link to left is negative or same as current idx linksL[idy + naky*idx]++; idx_star = idxLeft[idy + naky*(idx_star)]; } //linksR = number of links to the right linksR[idy + naky*idx] = 0; idx_star = idx; while(idx_star != idxRight[idy + naky*idx_star] && idxRight[idy + naky*idx_star] >= 0) { linksR[idy + naky*idx]++; idx_star = idxRight[idy + naky*idx_star]; } } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("linksL[%d,%d]= %d ", idy, idx, linksL[idy + naky*idx]); } printf("\n"); } for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("linksR[%d,%d]= %d ", idy, idx, linksR[idy + naky*idx]); } printf("\n"); } */ //now we set up class array //nClasses = # of classes //first count number of links for each (kx,ky) int k = 0; for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { n_k[k] = 1 + linksL[idy + naky*idx] + linksR[idy + naky*idx]; k++; } } /* for(int idx=0; idx<nakx; idx++) { for(int idy=0; idy<naky; idy++) { printf("nLinks[%d,%d]= %d ", idy, idx, n_k[idy+naky*idx]); } printf("\n"); } */ //count how many unique values of n_k there are, which is the number of classes //sort... qsort(n_k, naky*nakx, sizeof(int), compare); //then count int nClasses = 1; for(int k=0; k<naky*nakx-1; k++) { if(n_k[k] != n_k[k+1]) nClasses= nClasses + 1; } return nClasses; } void GradParallelLinked::get_nLinks_nChains(int *nLinks, int *nChains, int *n_k, int nClasses, int naky, int nakx) { for(int c=0; c<nClasses; c++) { nChains[c] = 1; nLinks[c] = 0; } //fill the nChains and nLinks arrays int c = 0; for(int k=1; k<naky*nakx; k++) { if(n_k[k] == n_k[k-1]) nChains[c]++; else { nLinks[c] = n_k[k-1]; nChains[c] = nChains[c]/nLinks[c]; c++; } } c = nClasses-1; nLinks[c] = n_k[naky*nakx-1]; nChains[c] = nChains[c]/nLinks[c]; } void kt2ki(int idy, int idx, int *c, int *p, int* linksL, int* linksR, int nClasses, int* nLinks, int naky) { //get nLinks in the current chain int np_k = 1 + linksL[idy + naky*idx] + linksR[idy + naky*idx]; //find which class corresponds to this nLinks for(int i=0; i<nClasses; i++) { if(nLinks[i] == np_k) { *c= i; break; } } *p = linksL[idy + naky*idx]; } void fill(int *ky, int *kx, int idy, int idx, int *idxRight, int c, int p, int n, int naky, int nakx, int nshift, int nLinks) { int idx0; if(idx < (nakx+1)/2) idx0=idx; else idx0=idx+nshift; ky[p+nLinks*n] = idy; kx[p+nLinks*n] = idx0; int idxR=idx; for(p=1; p<nLinks; p++) { idxR = idxRight[idy + naky*idxR]; ky[p + nLinks*n] = idy; if(idxR < (nakx+1)/2) { kx[p + nLinks*n] = idxR; } else { kx[p + nLinks*n] = idxR+nshift; } } } void GradParallelLinked::kFill(int nClasses, int *nChains, int *nLinks, int **ky, int **kx, int *linksL, int *linksR, int *idxRight, int naky, int nakx) { int nshift = grids_->Nx-nakx; //fill the kx and ky arrays for(int ic=0; ic<nClasses; ic++) { int n = 0; int p, c; for(int idy=0; idy<naky; idy++) { for(int idx=0; idx<nakx; idx++) { kt2ki(idy, idx, &c, &p, linksL, linksR, nClasses, nLinks, naky); if(c==ic) { if(p==0) { fill(ky[c], kx[c], idy, idx, idxRight, c, p, n, naky, nakx, nshift, nLinks[c]); n++; } } } } } } void GradParallelLinked::linkPrint() { printf("Printing links...\n"); for(int c=0; c<nClasses; c++) { for(int n=0; n<nChains[c]; n++) { for(int p=0; p<nLinks[c]; p++) { if(ikxLinked_h[c][p+nLinks[c]*n]<(grids_->Nx-1)/3+1) { printf("(%d,%d) ", ikyLinked_h[c][p+nLinks[c]*n], ikxLinked_h[c][p+nLinks[c]*n]); } else { printf("(%d,%d) ",ikyLinked_h[c][p+nLinks[c]*n], ikxLinked_h[c][p+nLinks[c]*n]-grids_->Nx); } if(ikxLinked_h[c][p+nLinks[c]*n]>(grids_->Nx-1)/3 && ikxLinked_h[c][p+nLinks[c]*n]<2*(grids_->Nx/3)+1) { printf("->DEALIASING ERROR"); } /* *counter= *counter+1; */ } printf("\n"); } printf("\n\n"); } } void GradParallelLinked::set_callbacks() { for(int c=0; c<nClasses; c++) { // set up callback functions cudaDeviceSynchronize(); cufftXtSetCallback( zft_plan_forward[c], (void**) &zfts_Linked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback( dz_plan_forward[c], (void**) &i_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback( dz_plan_forward_singlemom[c], (void**) &i_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cufftXtSetCallback(abs_dz_plan_forward_singlemom[c], (void**) &abs_kzLinked_callbackPtr, CUFFT_CB_ST_COMPLEX, (void**)&kzLinked[c]); cudaDeviceSynchronize(); checkCuda(cudaGetLastError()); } } void GradParallelLinked::clear_callbacks() { for(int c=0; c<nClasses; c++) { // set up callback functions cudaDeviceSynchronize(); cufftXtClearCallback( zft_plan_inverse[c], CUFFT_CB_ST_COMPLEX); // cufftXtClearCallback( zft_plan_inverse_singlemom[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback( dz_plan_forward[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback( dz_plan_forward_singlemom[c], CUFFT_CB_ST_COMPLEX); cufftXtClearCallback(abs_dz_plan_forward_singlemom[c], CUFFT_CB_ST_COMPLEX); cudaDeviceSynchronize(); checkCuda(cudaGetLastError()); } }
ffdb458591302fb5cb7184bf5d0c8eb880e16990.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string.h> #include <ctype.h> #include "hash.h" //this is the only way that makes it work????? #include "host.h" /* * 2 copies, one used to retrieve string from hash on host * other is for computations on device */ __device__ static const char *dtable_letters = "hijklmnopqrstuvwxyzabcdefg"; __device__ static const char *dtable_digits = "2345678901"; static const char *table_letters = "hijklmnopqrstuvwxyzabcdefg"; static const char *table_digits = "2345678901"; __global__ void test_hash_host(uint32_t shift, unsigned int *ret_n, uint32_t *ret_arr, size_t letters, char *host) { uint32_t hash_tested, hash_string; uint32_t hash_digits, hash_letters; size_t i; int temp; hash_digits = hash_letters = hash_tested = shift + blockIdx.x * blockDim.x + threadIdx.x; hash_string = FNV1_32_INIT; ROTATE32_LEFTN(hash_digits, letters); for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(host[i] >= 'a' && host[i] <= 'z') { temp = host[i] - 'a'; temp -= hash_letters % 26; if(temp < 0) { temp += 26; } hash_string = fnv_hash_streamone(hash_string, dtable_letters[temp]); ROTATE32_LEFT(hash_letters); } else if(host[i] >= '0' && host[i] <= '9') { temp = host[i] - '0'; temp -= hash_digits % 10; if(temp < 0) { temp += 10; } hash_string = fnv_hash_streamone(hash_string, dtable_digits[temp]); } ROTATE32_LEFT(hash_digits); } if(host[i] == '.') { hash_string = fnv_hash_streamend(hash_string, host + i); } if(hash_string == hash_tested) { atomicMin(ret_n, 128); ret_arr[atomicAdd(ret_n, 1)] = hash_string; } } bool is_valid_host(char *host) { for(; *host != '\0'; host++) { if(!(isdigit(*host) || islower(*host) || *host == '-' || *host == '.')) { return false; } } return true; } size_t count_letters(char *host) { size_t chars; for(chars = 0; *host != '\0' && *host != '.'; host++) { if(islower(*host)) { chars++; } } return chars; } void host_from_hash(uint32_t hash, char *host) { size_t i; int temp; for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(islower(host[i])) { temp = host[i] - 'a'; temp -= hash % 26; if(temp < 0) { temp += 26; } //we just assume it uses 64bit values during calculations host[i] = table_letters[temp]; ROTATE32_LEFT(hash); } } for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(isdigit(host[i])) { temp = host[i] - '0'; temp -= hash % 10; if(temp < 0) { temp += 10; } host[i] = table_digits[temp]; } ROTATE32_LEFT(hash); } }
ffdb458591302fb5cb7184bf5d0c8eb880e16990.cu
#include <string.h> #include <ctype.h> #include "hash.h" //this is the only way that makes it work????? #include "host.h" /* * 2 copies, one used to retrieve string from hash on host * other is for computations on device */ __device__ static const char *dtable_letters = "hijklmnopqrstuvwxyzabcdefg"; __device__ static const char *dtable_digits = "2345678901"; static const char *table_letters = "hijklmnopqrstuvwxyzabcdefg"; static const char *table_digits = "2345678901"; __global__ void test_hash_host(uint32_t shift, unsigned int *ret_n, uint32_t *ret_arr, size_t letters, char *host) { uint32_t hash_tested, hash_string; uint32_t hash_digits, hash_letters; size_t i; int temp; hash_digits = hash_letters = hash_tested = shift + blockIdx.x * blockDim.x + threadIdx.x; hash_string = FNV1_32_INIT; ROTATE32_LEFTN(hash_digits, letters); for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(host[i] >= 'a' && host[i] <= 'z') { temp = host[i] - 'a'; temp -= hash_letters % 26; if(temp < 0) { temp += 26; } hash_string = fnv_hash_streamone(hash_string, dtable_letters[temp]); ROTATE32_LEFT(hash_letters); } else if(host[i] >= '0' && host[i] <= '9') { temp = host[i] - '0'; temp -= hash_digits % 10; if(temp < 0) { temp += 10; } hash_string = fnv_hash_streamone(hash_string, dtable_digits[temp]); } ROTATE32_LEFT(hash_digits); } if(host[i] == '.') { hash_string = fnv_hash_streamend(hash_string, host + i); } if(hash_string == hash_tested) { atomicMin(ret_n, 128); ret_arr[atomicAdd(ret_n, 1)] = hash_string; } } bool is_valid_host(char *host) { for(; *host != '\0'; host++) { if(!(isdigit(*host) || islower(*host) || *host == '-' || *host == '.')) { return false; } } return true; } size_t count_letters(char *host) { size_t chars; for(chars = 0; *host != '\0' && *host != '.'; host++) { if(islower(*host)) { chars++; } } return chars; } void host_from_hash(uint32_t hash, char *host) { size_t i; int temp; for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(islower(host[i])) { temp = host[i] - 'a'; temp -= hash % 26; if(temp < 0) { temp += 26; } //we just assume it uses 64bit values during calculations host[i] = table_letters[temp]; ROTATE32_LEFT(hash); } } for(i = 0; host[i] != '\0' && host[i] != '.'; i++) { if(isdigit(host[i])) { temp = host[i] - '0'; temp -= hash % 10; if(temp < 0) { temp += 10; } host[i] = table_digits[temp]; } ROTATE32_LEFT(hash); } }
c4e57461a829d0e11f6e855d260695b1b35368da.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/DistributionTemplates.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THH/THHGeneral.h> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in hiprand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state) * in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { hiprandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(hiprandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { return hiprand_uniform(&state); } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter; iter.add_output(ret); iter.add_input(count); iter.add_input(prob); iter.build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (hiprandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return hiprand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return hiprand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter; iter.add_output(ret); iter.add_input(gamma); iter.add_input(gamma_sum); iter.build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter; iter.add_output(ret); iter.add_input(self); iter.add_input(output); iter.build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter; iter.add_output(ret); iter.add_input(x); iter.add_input(alpha); iter.add_input(total); iter.build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
c4e57461a829d0e11f6e855d260695b1b35368da.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/CUDAGeneratorImpl.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/DistributionTemplates.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <THC/THCGeneral.h> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in curand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) * in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using curand distributions that utilize curand4 call. curand4 call doesn't have the * register spilling problem. */ namespace { template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda)); }); } struct curand_uniform_wrapper { curandStatePhilox4_32_10_t &state; __device__ curand_uniform_wrapper(curandStatePhilox4_32_10_t &state): state(state) {} __device__ float operator()() { return curand_uniform(&state); } }; template <typename scalar_t> void binomial_cuda_kernel( at::Tensor& ret, const at::Tensor& count, const at::Tensor& prob, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::TensorIterator iter; iter.add_output(ret); iter.add_input(count); iter.add_input(prob); iter.build(); at::native::distribution_binary_kernel(iter, seeds, [seeds] GPU_LAMBDA (curandStatePhilox4_32_10_t& state, scalar_t count, scalar_t prob) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) auto uniform_lambda = curand_uniform_wrapper(state); BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto sample = sample_binomial<scalar_t, accscalar_t, decltype(uniform_lambda)>(count, prob, standard_uniform); return static_cast<scalar_t>(sample); #else return count; // useless. #endif } ); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return curand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return curand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true); at::TensorIterator iter; iter.add_output(ret); iter.add_input(gamma); iter.add_input(gamma_sum); iter.build(); at::native::gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gamma, scalar_t gamma_sum) { auto ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; return ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(20); } Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, rng_engine_inputs); }); return ret; } Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(42); } Tensor ret = at::empty(count.sizes(), count.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "binomial_cuda", [&] { binomial_cuda_kernel<scalar_t>(ret, count, prob, rng_engine_inputs); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, rng_engine_inputs); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional<Generator> gen_) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator()); std::pair<uint64_t, uint64_t> rng_engine_inputs; { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); rng_engine_inputs = gen->philox_engine_inputs(10); } Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, rng_engine_inputs); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); TensorIterator iter; iter.add_output(ret); iter.add_input(self); iter.add_input(output); iter.build(); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "_standard_gamma_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t self_val, scalar_t output_val) { return standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); }); return ret; } Tensor _dirichlet_grad_cuda(const Tensor& x, const Tensor& alpha, const Tensor& total) { Tensor ret = at::empty(x.sizes(), x.options()); TensorIterator iter; iter.add_output(ret); iter.add_input(x); iter.add_input(alpha); iter.add_input(total); iter.build(); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "_dirichlet_grad_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t x_val, scalar_t alpha_val, scalar_t total_val) -> scalar_t { return dirichlet_grad_one<scalar_t, accscalar_t>(x_val, alpha_val, total_val); }); }); return ret; } }} // namespace at::native
abba553fc4a8d4581a5357f17e28581f31a92507.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AccuracyForwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; int num_better_predictions = -1; // true_class also counts as "better" if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[index] = (num_better_predictions < top_k); counts[index] = 1; } } } template <typename Dtype> __global__ void AccuracyForwardWithPerClassGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, Dtype* counts, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[label_value*nthreads + index] += (num_better_predictions < top_k); counts[label_value*nthreads + index] = 1; } } } template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_label = bottom[1]->gpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* acc_data = bottom[0]->mutable_gpu_diff(); if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AccuracyForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); Dtype acc; caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well // allocate space for more detailed "counts" nums_buffer_.ReshapeLike(*bottom[0]); Dtype* counts = nums_buffer_.mutable_gpu_data(); caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AccuracyForwardWithPerClassGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_); // get the overall accuracy Dtype acc; caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy Dtype* per_class_acc = top[1]->mutable_cpu_data(); for (int l = 0; l < num_labels; l++) { caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
abba553fc4a8d4581a5357f17e28581f31a92507.cu
#include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AccuracyForwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; int num_better_predictions = -1; // true_class also counts as "better" if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[index] = (num_better_predictions < top_k); counts[index] = 1; } } } template <typename Dtype> __global__ void AccuracyForwardWithPerClassGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, Dtype* counts, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[label_value*nthreads + index] += (num_better_predictions < top_k); counts[label_value*nthreads + index] = 1; } } } template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_label = bottom[1]->gpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* acc_data = bottom[0]->mutable_gpu_diff(); if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) AccuracyForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); Dtype acc; caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well // allocate space for more detailed "counts" nums_buffer_.ReshapeLike(*bottom[0]); Dtype* counts = nums_buffer_.mutable_gpu_data(); caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) AccuracyForwardWithPerClassGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_); // get the overall accuracy Dtype acc; caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy Dtype* per_class_acc = top[1]->mutable_cpu_data(); for (int l = 0; l < num_labels; l++) { caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
ecfd77149d10e885df1f6c05fd0f4704c2719d0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ // The code below is mostly copied from Pytorch PersistentSoftmax.cuh #include "orttraining/training_ops/cuda/math/softmax_grad_impl.h" #include "core/providers/cuda/cudnn_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/math/softmax_common.h" #include "core/providers/cuda/math/softmax_warpwise_impl.cuh" #include "core/providers/cuda/shared_inc/accumulation_type.h" namespace onnxruntime { namespace cuda { template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax> __global__ void softmax_warp_backward(output_t* gradInput, const input_t* grad, const input_t* output, int element_count, int batch_count) { // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method // warp_softmax_backward_kernel. constexpr int next_power_of_two = 1 << log2_elements; constexpr int WARP_SIZE = (next_power_of_two < GPU_WARP_SIZE) ? next_power_of_two : GPU_WARP_SIZE; constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; #ifdef USE_ROCM constexpr int WARP_BATCH = 1; #else constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; #endif int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; // batch_count might not be a multiple of WARP_BATCH. Check how // many batches have to computed within this WARP. int local_batches = batch_count - first_batch; if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; // there might be multiple batches per warp. compute the index within the batch int local_idx = threadIdx.x % WARP_SIZE; // the first element to process by the current thread int thread_offset = first_batch * element_count + local_idx; grad += thread_offset; output += thread_offset; gradInput += thread_offset; // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep // the nested loops. // This should have no impact on performance because the loops are unrolled anyway. // load data from global memory acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]; acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]; acc_t grad_output_reg[WARP_BATCH][WARP_ITERATIONS]; for (int i = 0; i < WARP_BATCH; ++i) { int batch_element_count = (i >= local_batches) ? 0 : element_count; for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < batch_element_count) { grad_reg[i][it] = grad[i * element_count + it * WARP_SIZE]; output_reg[i][it] = output[i * element_count + it * WARP_SIZE]; grad_output_reg[i][it] = grad_reg[i][it] * output_reg[i][it]; } else { grad_reg[i][it] = acc_t(0); output_reg[i][it] = acc_t(0); grad_output_reg[i][it] = acc_t(0); } } } acc_t sum[WARP_BATCH]; if (!is_log_softmax) { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = grad_output_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_output_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } else { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = grad_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } // store result #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { if (i >= local_batches) break; #pragma unroll for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < element_count) { // compute gradients if (is_log_softmax) { gradInput[i * element_count + it * WARP_SIZE] = (grad_reg[i][it] - ::exp(output_reg[i][it]) * sum[i]); } else { gradInput[i * element_count + it * WARP_SIZE] = (grad_reg[i][it] - sum[i] ) * output_reg[i][it]; } } } } } // The function "softmax_warp_backward" saves intermediate results in float32 using registers to prevent recomputing, which can be beneficial for small shapes. // However, for larger shapes, the usage of a large register resource can lead to low CUDA warp occupancy and poor performance. // In contrast, "softmax_warp_backward_register_efficicent" recomputes intermediate results instead of saving them and also saves the inputs in float16 format to further reduce register usage. // TODO: If the dimension to do softmax is greater than 2048, saving the input into shared memory can further reduce register usage. template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax> __global__ void softmax_warp_backward_register_efficicent(output_t* gradInput, const input_t* grad, const input_t* output, int element_count, int batch_count) { // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method // warp_softmax_backward_kernel. constexpr int next_power_of_two = 1 << log2_elements; constexpr int WARP_SIZE = (next_power_of_two < GPU_WARP_SIZE) ? next_power_of_two : GPU_WARP_SIZE; constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; constexpr int WARP_BATCH = 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; // batch_count might not be a multiple of WARP_BATCH. Check how // many batches have to computed within this WARP. int local_batches = batch_count - first_batch; if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; // there might be multiple batches per warp. compute the index within the batch int local_idx = threadIdx.x % WARP_SIZE; // the first element to process by the current thread int thread_offset = first_batch * element_count + local_idx; grad += thread_offset; output += thread_offset; gradInput += thread_offset; // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep // the nested loops. // This should have no impact on performance because the loops are unrolled anyway. // load data from global memory input_t grad_reg[WARP_BATCH][WARP_ITERATIONS]; input_t output_reg[WARP_BATCH][WARP_ITERATIONS]; for (int i = 0; i < WARP_BATCH; ++i) { int batch_element_count = (i >= local_batches) ? 0 : element_count; for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < batch_element_count) { grad_reg[i][it] = grad[i * element_count + it * WARP_SIZE]; output_reg[i][it] = output[i * element_count + it * WARP_SIZE]; } else { grad_reg[i][it] = input_t(0); output_reg[i][it] = input_t(0); } } } acc_t sum[WARP_BATCH]; if (!is_log_softmax) { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = (acc_t)(grad_reg[i][0]) * (acc_t)(output_reg[i][0]); #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += (acc_t)(grad_reg[i][it]) * (acc_t)(output_reg[i][it]); } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } else { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = (acc_t)grad_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += (acc_t)grad_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } // store result #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { if (i >= local_batches) break; #pragma unroll for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < element_count) { // compute gradients if (is_log_softmax) { gradInput[i * element_count + it * WARP_SIZE] = ((acc_t)(grad_reg[i][it]) - ::exp((acc_t)output_reg[i][it]) * sum[i]); } else { gradInput[i * element_count + it * WARP_SIZE] = ((acc_t)grad_reg[i][it] - sum[i] ) * (acc_t)output_reg[i][it]; } } } } } template <typename T> Status SoftmaxGradImpl(hipStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad, const T* output_grad, const T* softmax_output, int element_count, int batch_count, bool is_log_softmax) { if (element_count == 0) return Status::OK(); #ifdef USE_ROCM if (element_count <= 1024 && element_count * sizeof(T) <= 4096) { #else if (element_count <= 2048 && element_count * sizeof(T) <= 4096) { #endif typedef AccumulationType_t<T> AccT; int log2_elements = log2_ceil(element_count); const int next_power_of_two = 1 << log2_elements; // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. int warp_size = ::min(next_power_of_two, GPU_WARP_SIZE_HOST); // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. #ifdef USE_ROCM int batches_per_warp = 1; constexpr int threads_per_block = 256; #else int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; constexpr int threads_per_block = 128; #endif int warps_per_block = (threads_per_block / warp_size); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_count + batches_per_block - 1) / batches_per_block; dim3 threads(warp_size, warps_per_block, 1); // Launch code would be more elegant if C++ supported FOR CONSTEXPR constexpr int start_to_use_register_efficient_func = 11; switch (log2_elements) { #define LAUNCH_KERNEL(log2_elements_value, kernel_name) \ if (is_log_softmax) { \ hipLaunchKernelGGL(( kernel_name<T, T, AccT, log2_elements_value, true>) \ , dim3(blocks), dim3(threads), 0, stream, input_grad, output_grad, softmax_output, element_count, batch_count); \ } else { \ hipLaunchKernelGGL(( kernel_name<T, T, AccT, log2_elements_value, false>) \ , dim3(blocks), dim3(threads), 0, stream, input_grad, output_grad, softmax_output, element_count, batch_count); \ } #define CASE_LOG2_ELEMENTS(log2_elements_value) \ case log2_elements_value: { \ if (log2_elements_value < start_to_use_register_efficient_func) { \ LAUNCH_KERNEL(log2_elements_value, softmax_warp_backward); \ } else { \ LAUNCH_KERNEL(log2_elements_value, softmax_warp_backward_register_efficicent); \ } \ } break CASE_LOG2_ELEMENTS(0); // 1 CASE_LOG2_ELEMENTS(1); // 2 CASE_LOG2_ELEMENTS(2); // 4 CASE_LOG2_ELEMENTS(3); // 8 CASE_LOG2_ELEMENTS(4); // 16 CASE_LOG2_ELEMENTS(5); // 32 CASE_LOG2_ELEMENTS(6); // 64 CASE_LOG2_ELEMENTS(7); // 128 CASE_LOG2_ELEMENTS(8); // 256 CASE_LOG2_ELEMENTS(9); // 512 CASE_LOG2_ELEMENTS(10); // 1024 CASE_LOG2_ELEMENTS(11); // 2048, start to use softmax_warp_backward_register_efficicent, decided by value of start_to_use_register_efficient_func #undef LAUNCH_KERNEL #undef CASE_LOG2_ELEMENTS } return Status::OK(); } const int64_t dims[]{batch_count, 1, 1, element_count}; // cudnn expects 4D shape in NCHW format const auto alpha = Consts<T>::One; const auto beta = Consts<T>::Zero; CudnnTensor input_tensor, output_tensor; ORT_RETURN_IF_ERROR(input_tensor.Set(dims, CudnnTensor::GetDataType<T>())); ORT_RETURN_IF_ERROR(output_tensor.Set(dims, CudnnTensor::GetDataType<T>())); return SoftmaxBackward(cudnn_handle, is_log_softmax, &alpha, input_tensor, softmax_output, output_grad, &beta, output_tensor, input_grad); } #define SPECIALIZED_SOFTMAX_GRAD_IMPL(type) \ template Status SoftmaxGradImpl<type>(hipStream_t stream, cudnnHandle_t cudnn_handle, type * input_grad, \ const type* output_grad, const type* softmax_output, int element_count, \ int batch_count, bool is_log_softmax); SPECIALIZED_SOFTMAX_GRAD_IMPL(float) SPECIALIZED_SOFTMAX_GRAD_IMPL(half) SPECIALIZED_SOFTMAX_GRAD_IMPL(BFloat16) #ifdef USE_ROCM SPECIALIZED_SOFTMAX_GRAD_IMPL(double) #endif #undef SPECIALIZED_SOFTMAX_GRAD_IMPL } }
ecfd77149d10e885df1f6c05fd0f4704c2719d0d.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Modifications Copyright (c) Microsoft. */ // The code below is mostly copied from Pytorch PersistentSoftmax.cuh #include "orttraining/training_ops/cuda/math/softmax_grad_impl.h" #include "core/providers/cuda/cudnn_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/math/softmax_common.h" #include "core/providers/cuda/math/softmax_warpwise_impl.cuh" #include "core/providers/cuda/shared_inc/accumulation_type.h" namespace onnxruntime { namespace cuda { template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax> __global__ void softmax_warp_backward(output_t* gradInput, const input_t* grad, const input_t* output, int element_count, int batch_count) { // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method // warp_softmax_backward_kernel. constexpr int next_power_of_two = 1 << log2_elements; constexpr int WARP_SIZE = (next_power_of_two < GPU_WARP_SIZE) ? next_power_of_two : GPU_WARP_SIZE; constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; #ifdef USE_ROCM constexpr int WARP_BATCH = 1; #else constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; #endif int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; // batch_count might not be a multiple of WARP_BATCH. Check how // many batches have to computed within this WARP. int local_batches = batch_count - first_batch; if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; // there might be multiple batches per warp. compute the index within the batch int local_idx = threadIdx.x % WARP_SIZE; // the first element to process by the current thread int thread_offset = first_batch * element_count + local_idx; grad += thread_offset; output += thread_offset; gradInput += thread_offset; // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep // the nested loops. // This should have no impact on performance because the loops are unrolled anyway. // load data from global memory acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]; acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]; acc_t grad_output_reg[WARP_BATCH][WARP_ITERATIONS]; for (int i = 0; i < WARP_BATCH; ++i) { int batch_element_count = (i >= local_batches) ? 0 : element_count; for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < batch_element_count) { grad_reg[i][it] = grad[i * element_count + it * WARP_SIZE]; output_reg[i][it] = output[i * element_count + it * WARP_SIZE]; grad_output_reg[i][it] = grad_reg[i][it] * output_reg[i][it]; } else { grad_reg[i][it] = acc_t(0); output_reg[i][it] = acc_t(0); grad_output_reg[i][it] = acc_t(0); } } } acc_t sum[WARP_BATCH]; if (!is_log_softmax) { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = grad_output_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_output_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } else { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = grad_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += grad_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } // store result #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { if (i >= local_batches) break; #pragma unroll for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < element_count) { // compute gradients if (is_log_softmax) { gradInput[i * element_count + it * WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]); } else { gradInput[i * element_count + it * WARP_SIZE] = (grad_reg[i][it] - sum[i] ) * output_reg[i][it]; } } } } } // The function "softmax_warp_backward" saves intermediate results in float32 using registers to prevent recomputing, which can be beneficial for small shapes. // However, for larger shapes, the usage of a large register resource can lead to low CUDA warp occupancy and poor performance. // In contrast, "softmax_warp_backward_register_efficicent" recomputes intermediate results instead of saving them and also saves the inputs in float16 format to further reduce register usage. // TODO: If the dimension to do softmax is greater than 2048, saving the input into shared memory can further reduce register usage. template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax> __global__ void softmax_warp_backward_register_efficicent(output_t* gradInput, const input_t* grad, const input_t* output, int element_count, int batch_count) { // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method // warp_softmax_backward_kernel. constexpr int next_power_of_two = 1 << log2_elements; constexpr int WARP_SIZE = (next_power_of_two < GPU_WARP_SIZE) ? next_power_of_two : GPU_WARP_SIZE; constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; constexpr int WARP_BATCH = 1; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; // batch_count might not be a multiple of WARP_BATCH. Check how // many batches have to computed within this WARP. int local_batches = batch_count - first_batch; if (local_batches > WARP_BATCH) local_batches = WARP_BATCH; // there might be multiple batches per warp. compute the index within the batch int local_idx = threadIdx.x % WARP_SIZE; // the first element to process by the current thread int thread_offset = first_batch * element_count + local_idx; grad += thread_offset; output += thread_offset; gradInput += thread_offset; // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep // the nested loops. // This should have no impact on performance because the loops are unrolled anyway. // load data from global memory input_t grad_reg[WARP_BATCH][WARP_ITERATIONS]; input_t output_reg[WARP_BATCH][WARP_ITERATIONS]; for (int i = 0; i < WARP_BATCH; ++i) { int batch_element_count = (i >= local_batches) ? 0 : element_count; for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < batch_element_count) { grad_reg[i][it] = grad[i * element_count + it * WARP_SIZE]; output_reg[i][it] = output[i * element_count + it * WARP_SIZE]; } else { grad_reg[i][it] = input_t(0); output_reg[i][it] = input_t(0); } } } acc_t sum[WARP_BATCH]; if (!is_log_softmax) { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = (acc_t)(grad_reg[i][0]) * (acc_t)(output_reg[i][0]); #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += (acc_t)(grad_reg[i][it]) * (acc_t)(output_reg[i][it]); } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } else { #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { sum[i] = (acc_t)grad_reg[i][0]; #pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) { sum[i] += (acc_t)grad_reg[i][it]; } } warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); } // store result #pragma unroll for (int i = 0; i < WARP_BATCH; ++i) { if (i >= local_batches) break; #pragma unroll for (int it = 0; it < WARP_ITERATIONS; ++it) { int element_index = local_idx + it * WARP_SIZE; if (element_index < element_count) { // compute gradients if (is_log_softmax) { gradInput[i * element_count + it * WARP_SIZE] = ((acc_t)(grad_reg[i][it]) - std::exp((acc_t)output_reg[i][it]) * sum[i]); } else { gradInput[i * element_count + it * WARP_SIZE] = ((acc_t)grad_reg[i][it] - sum[i] ) * (acc_t)output_reg[i][it]; } } } } } template <typename T> Status SoftmaxGradImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad, const T* output_grad, const T* softmax_output, int element_count, int batch_count, bool is_log_softmax) { if (element_count == 0) return Status::OK(); #ifdef USE_ROCM if (element_count <= 1024 && element_count * sizeof(T) <= 4096) { #else if (element_count <= 2048 && element_count * sizeof(T) <= 4096) { #endif typedef AccumulationType_t<T> AccT; int log2_elements = log2_ceil(element_count); const int next_power_of_two = 1 << log2_elements; // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. int warp_size = std::min(next_power_of_two, GPU_WARP_SIZE_HOST); // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. #ifdef USE_ROCM int batches_per_warp = 1; constexpr int threads_per_block = 256; #else int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; constexpr int threads_per_block = 128; #endif int warps_per_block = (threads_per_block / warp_size); int batches_per_block = warps_per_block * batches_per_warp; int blocks = (batch_count + batches_per_block - 1) / batches_per_block; dim3 threads(warp_size, warps_per_block, 1); // Launch code would be more elegant if C++ supported FOR CONSTEXPR constexpr int start_to_use_register_efficient_func = 11; switch (log2_elements) { #define LAUNCH_KERNEL(log2_elements_value, kernel_name) \ if (is_log_softmax) { \ kernel_name<T, T, AccT, log2_elements_value, true> \ <<<blocks, threads, 0, stream>>>(input_grad, output_grad, softmax_output, element_count, batch_count); \ } else { \ kernel_name<T, T, AccT, log2_elements_value, false> \ <<<blocks, threads, 0, stream>>>(input_grad, output_grad, softmax_output, element_count, batch_count); \ } #define CASE_LOG2_ELEMENTS(log2_elements_value) \ case log2_elements_value: { \ if (log2_elements_value < start_to_use_register_efficient_func) { \ LAUNCH_KERNEL(log2_elements_value, softmax_warp_backward); \ } else { \ LAUNCH_KERNEL(log2_elements_value, softmax_warp_backward_register_efficicent); \ } \ } break CASE_LOG2_ELEMENTS(0); // 1 CASE_LOG2_ELEMENTS(1); // 2 CASE_LOG2_ELEMENTS(2); // 4 CASE_LOG2_ELEMENTS(3); // 8 CASE_LOG2_ELEMENTS(4); // 16 CASE_LOG2_ELEMENTS(5); // 32 CASE_LOG2_ELEMENTS(6); // 64 CASE_LOG2_ELEMENTS(7); // 128 CASE_LOG2_ELEMENTS(8); // 256 CASE_LOG2_ELEMENTS(9); // 512 CASE_LOG2_ELEMENTS(10); // 1024 CASE_LOG2_ELEMENTS(11); // 2048, start to use softmax_warp_backward_register_efficicent, decided by value of start_to_use_register_efficient_func #undef LAUNCH_KERNEL #undef CASE_LOG2_ELEMENTS } return Status::OK(); } const int64_t dims[]{batch_count, 1, 1, element_count}; // cudnn expects 4D shape in NCHW format const auto alpha = Consts<T>::One; const auto beta = Consts<T>::Zero; CudnnTensor input_tensor, output_tensor; ORT_RETURN_IF_ERROR(input_tensor.Set(dims, CudnnTensor::GetDataType<T>())); ORT_RETURN_IF_ERROR(output_tensor.Set(dims, CudnnTensor::GetDataType<T>())); return SoftmaxBackward(cudnn_handle, is_log_softmax, &alpha, input_tensor, softmax_output, output_grad, &beta, output_tensor, input_grad); } #define SPECIALIZED_SOFTMAX_GRAD_IMPL(type) \ template Status SoftmaxGradImpl<type>(cudaStream_t stream, cudnnHandle_t cudnn_handle, type * input_grad, \ const type* output_grad, const type* softmax_output, int element_count, \ int batch_count, bool is_log_softmax); SPECIALIZED_SOFTMAX_GRAD_IMPL(float) SPECIALIZED_SOFTMAX_GRAD_IMPL(half) SPECIALIZED_SOFTMAX_GRAD_IMPL(BFloat16) #ifdef USE_CUDA SPECIALIZED_SOFTMAX_GRAD_IMPL(double) #endif #undef SPECIALIZED_SOFTMAX_GRAD_IMPL } }
878e8d18e999e3f2a4c675aa9eb261a1011190c1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "rocblas.h"//cuBLAS //GPU // // cuda_debug_x64_PropertySheet // defining void nvcc_test(){ std::cout << "I have been processed by NVCC!" << std::endl; } //GPU void print_GPU_device_info(){ int deviceCount; hipGetDeviceCount(&deviceCount); for (int i = 0; i<deviceCount; i++){ hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); std::cout << "GPU device " << i << ": " << devProp.name << std::endl; std::cout << " " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM" << devProp.multiProcessorCount << std::endl; std::cout << "SM" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "SM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "Block" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "Block" << devProp.maxThreadsPerBlock << std::endl; std::cout << "Block32 " << devProp.regsPerBlock << std::endl; std::cout << "======================================================" << std::endl; } } // void testoperation(){ srand(unsigned(time(NULL))); // hipblasStatus_t status; const int N = 10, M = 5; // "" float *h_A = (float*)malloc(N * M * sizeof(float)); float *h_B = (float*)malloc(N * M * sizeof(float)); // "" float *h_C = (float*)malloc(M * M * sizeof(float)); // 0-10 for (int i = 0; i < N * M; i++) { h_A[i] = (float)(rand() % 10 + 1); h_B[i] = (float)(rand() % 10 + 1); } // std::cout << " A :" << std::endl; for (int i = 0; i < N * M; i++){ std::cout << h_A[i] << " "; if ((i + 1) % N == 0) std::cout << std::endl; } std::cout << std::endl; std::cout << " B :" << std::endl; for (int i = 0; i < N * M; i++){ std::cout << h_B[i] << " "; if ((i + 1) % M == 0) std::cout << std::endl; } std::cout << std::endl; // GPU // CUBLAS hipblasHandle_t handle; status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS){ if (status == HIPBLAS_STATUS_NOT_INITIALIZED){ std::cout << "CUBLAS " << std::endl; } return; } float *d_A, *d_B, *d_C; // "" hipMalloc( (void**)&d_A, // N*M * sizeof(float) // ); hipMalloc((void**)&d_B, N * M * sizeof(float)); // "" hipMalloc((void**)&d_C, M * M * sizeof(float)); // hipblasSetVector( N * M, // sizeof(float), // h_A, // 1, // d_A, // GPU 1 // ); hipblasSetVector(N * M, sizeof(float), h_B, 1, d_B, 1); // hipDeviceSynchronize(); // float a = 1; float b = 0; // hipblasSgemm( handle, // blas HIPBLAS_OP_T, // A HIPBLAS_OP_T, // B M, // A, C M, // B, C N, // A B &a, // d_A, // A N, // lda d_B, // B M, // ldb &b, // d_C, // C () M // ldc ); // hipDeviceSynchronize(); // hipblasGetVector( M*M, // sizeof(float), // d_C, // GPU 1, // h_C, // 1 // ); // std::cout << " ( (A*B) )" << std::endl; for (int i = 0; i<M*M; i++){ std::cout << h_C[i] << " "; if ((i + 1) % M == 0) std::cout << std::endl; } // free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); // CUBLAS hipblasDestroy(handle); }
878e8d18e999e3f2a4c675aa9eb261a1011190c1.cu
#include <iostream> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cublas_v2.h"//矩阵运算库cuBLAS //本文件将调用GPU进行运算 //注意将本文件添加到 源文件中, 而不是资源文件 //本项目的属性可以通过 “cuda_debug_x64_PropertySheet”添加 // defining void nvcc_test(){ std::cout << "I have been processed by NVCC!" << std::endl; } //打印GPU设备信息 void print_GPU_device_info(){ int deviceCount; cudaGetDeviceCount(&deviceCount); for (int i = 0; i<deviceCount; i++){ cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl; std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个SM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个SM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "每个Block的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个Block的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "每个Block中可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl; std::cout << "======================================================" << std::endl; } } //运算测试 void testoperation(){ srand(unsigned(time(NULL))); // 定义状态变量 cublasStatus_t status; const int N = 10, M = 5; // 在 "内存" 中为将要计算的矩阵开辟空间 float *h_A = (float*)malloc(N * M * sizeof(float)); float *h_B = (float*)malloc(N * M * sizeof(float)); // 在 "内存" 中为将要存放运算结果的矩阵开辟空间 float *h_C = (float*)malloc(M * M * sizeof(float)); // 为待运算矩阵的元素赋予 0-10 范围内的随机数 for (int i = 0; i < N * M; i++) { h_A[i] = (float)(rand() % 10 + 1); h_B[i] = (float)(rand() % 10 + 1); } // 打印待测试的矩阵 std::cout << "矩阵 A :" << std::endl; for (int i = 0; i < N * M; i++){ std::cout << h_A[i] << " "; if ((i + 1) % N == 0) std::cout << std::endl; } std::cout << std::endl; std::cout << "矩阵 B :" << std::endl; for (int i = 0; i < N * M; i++){ std::cout << h_B[i] << " "; if ((i + 1) % M == 0) std::cout << std::endl; } std::cout << std::endl; // GPU 计算矩阵相乘 // 创建并初始化 CUBLAS 库对象 cublasHandle_t handle; status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS){ if (status == CUBLAS_STATUS_NOT_INITIALIZED){ std::cout << "CUBLAS 对象实例化出错" << std::endl; } return; } float *d_A, *d_B, *d_C; // 在 "显存" 中为将要计算的矩阵开辟空间 cudaMalloc( (void**)&d_A, // 指向开辟的空间的指针 N*M * sizeof(float) // 需要开辟空间的字节数 ); cudaMalloc((void**)&d_B, N * M * sizeof(float)); // 在 "显存" 中为将要存放运算结果的矩阵开辟空间 cudaMalloc((void**)&d_C, M * M * sizeof(float)); // 将矩阵数据传递进 显存 中已经开辟好了的空间 cublasSetVector( N * M, // 要存入显存的元素个数 sizeof(float), // 每个元素大小 h_A, // 主机端起始地址 1, // 连续元素之间的存储间隔 d_A, // GPU 端起始地址 1 // 连续元素之间的存储间隔 ); cublasSetVector(N * M, sizeof(float), h_B, 1, d_B, 1); // 同步函数 cudaThreadSynchronize(); // 传递进矩阵相乘函数中的参数,具体含义请参考函数手册。 float a = 1; float b = 0; // 矩阵相乘。该函数必然将数组解析成列优先数组 cublasSgemm( handle, // blas 库对象 CUBLAS_OP_T, // 矩阵 A 属性参数 CUBLAS_OP_T, // 矩阵 B 属性参数 M, // A, C 的行数 M, // B, C 的列数 N, // A 的列数和 B 的行数 &a, // 运算式的 α 值 d_A, // A 在显存中的地址 N, // lda d_B, // B 在显存中的地址 M, // ldb &b, // 运算式的 β 值 d_C, // C 在显存中的地址(结果矩阵) M // ldc ); // 同步函数 cudaThreadSynchronize(); // 从 显存 中取出运算结果至 内存中去 cublasGetVector( M*M, // 要取出元素的个数 sizeof(float), // 每个元素大小 d_C, // GPU 端起始地址 1, // 连续元素之间的存储间隔 h_C, // 主机端起始地址 1 // 连续元素之间的存储间隔 ); // 打印运算结果 std::cout << "计算结果的转置 ( (A*B)的转置 ):" << std::endl; for (int i = 0; i<M*M; i++){ std::cout << h_C[i] << " "; if ((i + 1) % M == 0) std::cout << std::endl; } // 清理掉使用过的内存 free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // 释放 CUBLAS 库对象 cublasDestroy(handle); }
dd492634d79a7713ac041740b9405ac1d07bd8c9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "find_maximum_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array = NULL; hipMalloc(&array, XSIZE*YSIZE); float *max = NULL; hipMalloc(&max, XSIZE*YSIZE); int *mutex = NULL; hipMalloc(&mutex, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( find_maximum_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,max,mutex,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( find_maximum_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,max,mutex,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( find_maximum_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,max,mutex,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dd492634d79a7713ac041740b9405ac1d07bd8c9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "find_maximum_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array = NULL; cudaMalloc(&array, XSIZE*YSIZE); float *max = NULL; cudaMalloc(&max, XSIZE*YSIZE); int *mutex = NULL; cudaMalloc(&mutex, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); find_maximum_kernel<<<gridBlock,threadBlock>>>(array,max,mutex,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { find_maximum_kernel<<<gridBlock,threadBlock>>>(array,max,mutex,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { find_maximum_kernel<<<gridBlock,threadBlock>>>(array,max,mutex,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ecab5855811ac9a09a3f16c3513bb4c0a8d33a2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/batch_fc_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace operators { using framework::Tensor; const int CUDA_NUM_THREADS = 1024; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void add_bias_kernel( T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * ins_num * out_dim) { int block_len = ins_num * out_dim; int slot_index = idx / block_len; int out_dim_index = (idx % block_len) % out_dim; T temp = data[idx] + bias[slot_index * out_dim + out_dim_index]; data[idx] = temp; } } template <typename T> void add_bias(gpuStream_t stream, T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { hipLaunchKernelGGL(( add_bias_kernel), dim3(GET_BLOCKS(slot_pairs_num * ins_num * out_dim)), dim3(CUDA_NUM_THREADS), 0, stream, data, slot_pairs_num, ins_num, out_dim, bias); } template <typename T> __global__ void add_bias_grad_kernel(const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * out_dim) { int row = idx / out_dim; int col = idx % out_dim; T temp = static_cast<T>(0); for (int i = 0; i < ins_num; ++i) { int select_indx = ((row + 1) * i + 1) * col; temp += dout_data[select_indx]; } db_data[idx] += temp; } } template <typename T> void add_bias_grad(gpuStream_t stream, const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { hipLaunchKernelGGL(( add_bias_grad_kernel), dim3(GET_BLOCKS(slot_pairs_num * out_dim)), dim3(CUDA_NUM_THREADS), 0, stream, dout_data, slot_pairs_num, ins_num, out_dim, db_data); } template <typename DeviceContext, typename T> class BatchFCCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { // X.dim = slot_pairs_num * ins_num * in_dim // W.dim = slot_pairs_num * in_dim * out_dim // b.dim = slot_pairs_num * out_dim // output.dim = slot_pairs_num * ins_num * out_dim auto* input = ctx.Input<framework::LoDTensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* bias = ctx.Input<Tensor>("Bias"); auto* output = ctx.Output<framework::LoDTensor>("Out"); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; // get data ptr const T* in_data = input->data<T>(); const T* w_data = w->data<T>(); const T* bias_data = bias->data<T>(); output->Resize({slot_pairs_num, ins_num, out_dim}); T* out_data = output->mutable_data<T>(ctx.GetPlace()); // initialize auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *ctx.template device_context<phi::GPUContext>().eigen_device(); out_eigen.device(place) = out_eigen.constant(static_cast<T>(0)); CBLAS_TRANSPOSE transA = CblasNoTrans; CBLAS_TRANSPOSE transB = CblasNoTrans; T alpha = 1; T beta = 0; int64_t strideA = ins_num * in_dim; int64_t strideB = in_dim * out_dim; auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx); blas.BatchedGEMM(transA, transB, ins_num, out_dim, in_dim, alpha, in_data, w_data, beta, out_data, slot_pairs_num, strideA, strideB); add_bias<T>(ctx.cuda_device_context().stream(), out_data, slot_pairs_num, ins_num, out_dim, bias_data); } }; template <typename DeviceContext, typename T> class BatchFCGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("Input")); auto* dw = ctx.Output<Tensor>(framework::GradVarName("W")); auto* db = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *ctx.template device_context<phi::GPUContext>().eigen_device(); // initialize dx->mutable_data<T>(ctx.GetPlace()); auto dx_eigen = framework::EigenVector<T>::Flatten(*dx); dx_eigen.device(place) = dx_eigen.constant(static_cast<T>(0)); dw->mutable_data<T>(ctx.GetPlace()); auto dw_eigen = framework::EigenVector<T>::Flatten(*dw); dw_eigen.device(place) = dw_eigen.constant(static_cast<T>(0)); // get data ptr const T* x_data = input->data<T>(); const T* w_data = w->data<T>(); const T* dout_data = dout->data<T>(); T* dx_data = dx->data<T>(); T* dw_data = dw->data<T>(); db->mutable_data<T>(ctx.GetPlace()); auto db_eigen = framework::EigenVector<T>::Flatten(*db); db_eigen.device(place) = db_eigen.constant(static_cast<T>(0)); T* db_data = db->data<T>(); add_bias_grad<T>(ctx.cuda_device_context().stream(), dout_data, slot_pairs_num, ins_num, out_dim, db_data); auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx); T alpha = 1; T beta = 0; // dx = dout_data * y^T blas.BatchedGEMM(CblasNoTrans, CblasTrans, ins_num, in_dim, out_dim, alpha, dout_data, w_data, beta, dx_data, slot_pairs_num, ins_num * out_dim, out_dim * in_dim); // dy = x^T * dout_data blas.BatchedGEMM(CblasTrans, CblasNoTrans, in_dim, out_dim, ins_num, alpha, x_data, dout_data, beta, dw_data, slot_pairs_num, in_dim * ins_num, ins_num * out_dim); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using GPUCtx = phi::GPUContext; REGISTER_OP_CUDA_KERNEL(batch_fc, ops::BatchFCCUDAKernel<GPUCtx, float>, ops::BatchFCCUDAKernel<GPUCtx, double>); REGISTER_OP_CUDA_KERNEL(batch_fc_grad, ops::BatchFCGradOpCUDAKernel<GPUCtx, float>, ops::BatchFCGradOpCUDAKernel<GPUCtx, double>);
ecab5855811ac9a09a3f16c3513bb4c0a8d33a2f.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/batch_fc_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace operators { using framework::Tensor; const int CUDA_NUM_THREADS = 1024; static inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void add_bias_kernel( T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * ins_num * out_dim) { int block_len = ins_num * out_dim; int slot_index = idx / block_len; int out_dim_index = (idx % block_len) % out_dim; T temp = data[idx] + bias[slot_index * out_dim + out_dim_index]; data[idx] = temp; } } template <typename T> void add_bias(gpuStream_t stream, T* data, int slot_pairs_num, int ins_num, int out_dim, const T* bias) { add_bias_kernel<<<GET_BLOCKS(slot_pairs_num * ins_num * out_dim), CUDA_NUM_THREADS, 0, stream>>>(data, slot_pairs_num, ins_num, out_dim, bias); } template <typename T> __global__ void add_bias_grad_kernel(const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { CUDA_KERNEL_LOOP(idx, slot_pairs_num * out_dim) { int row = idx / out_dim; int col = idx % out_dim; T temp = static_cast<T>(0); for (int i = 0; i < ins_num; ++i) { int select_indx = ((row + 1) * i + 1) * col; temp += dout_data[select_indx]; } db_data[idx] += temp; } } template <typename T> void add_bias_grad(gpuStream_t stream, const T* dout_data, int slot_pairs_num, int ins_num, int out_dim, T* db_data) { add_bias_grad_kernel<<<GET_BLOCKS(slot_pairs_num * out_dim), CUDA_NUM_THREADS, 0, stream>>>( dout_data, slot_pairs_num, ins_num, out_dim, db_data); } template <typename DeviceContext, typename T> class BatchFCCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { // X.dim = slot_pairs_num * ins_num * in_dim // W.dim = slot_pairs_num * in_dim * out_dim // b.dim = slot_pairs_num * out_dim // output.dim = slot_pairs_num * ins_num * out_dim auto* input = ctx.Input<framework::LoDTensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* bias = ctx.Input<Tensor>("Bias"); auto* output = ctx.Output<framework::LoDTensor>("Out"); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; // get data ptr const T* in_data = input->data<T>(); const T* w_data = w->data<T>(); const T* bias_data = bias->data<T>(); output->Resize({slot_pairs_num, ins_num, out_dim}); T* out_data = output->mutable_data<T>(ctx.GetPlace()); // initialize auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *ctx.template device_context<phi::GPUContext>().eigen_device(); out_eigen.device(place) = out_eigen.constant(static_cast<T>(0)); CBLAS_TRANSPOSE transA = CblasNoTrans; CBLAS_TRANSPOSE transB = CblasNoTrans; T alpha = 1; T beta = 0; int64_t strideA = ins_num * in_dim; int64_t strideB = in_dim * out_dim; auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx); blas.BatchedGEMM(transA, transB, ins_num, out_dim, in_dim, alpha, in_data, w_data, beta, out_data, slot_pairs_num, strideA, strideB); add_bias<T>(ctx.cuda_device_context().stream(), out_data, slot_pairs_num, ins_num, out_dim, bias_data); } }; template <typename DeviceContext, typename T> class BatchFCGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("Input"); auto* w = ctx.Input<Tensor>("W"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("Input")); auto* dw = ctx.Output<Tensor>(framework::GradVarName("W")); auto* db = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto input_dims = input->dims(); auto w_dims = w->dims(); auto slot_pairs_num = input_dims[0]; auto ins_num = input_dims[1]; auto in_dim = input_dims[2]; auto out_dim = w_dims[2]; auto& dev_ctx = ctx.template device_context<phi::GPUContext>(); auto& place = *ctx.template device_context<phi::GPUContext>().eigen_device(); // initialize dx->mutable_data<T>(ctx.GetPlace()); auto dx_eigen = framework::EigenVector<T>::Flatten(*dx); dx_eigen.device(place) = dx_eigen.constant(static_cast<T>(0)); dw->mutable_data<T>(ctx.GetPlace()); auto dw_eigen = framework::EigenVector<T>::Flatten(*dw); dw_eigen.device(place) = dw_eigen.constant(static_cast<T>(0)); // get data ptr const T* x_data = input->data<T>(); const T* w_data = w->data<T>(); const T* dout_data = dout->data<T>(); T* dx_data = dx->data<T>(); T* dw_data = dw->data<T>(); db->mutable_data<T>(ctx.GetPlace()); auto db_eigen = framework::EigenVector<T>::Flatten(*db); db_eigen.device(place) = db_eigen.constant(static_cast<T>(0)); T* db_data = db->data<T>(); add_bias_grad<T>(ctx.cuda_device_context().stream(), dout_data, slot_pairs_num, ins_num, out_dim, db_data); auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx); T alpha = 1; T beta = 0; // dx = dout_data * y^T blas.BatchedGEMM(CblasNoTrans, CblasTrans, ins_num, in_dim, out_dim, alpha, dout_data, w_data, beta, dx_data, slot_pairs_num, ins_num * out_dim, out_dim * in_dim); // dy = x^T * dout_data blas.BatchedGEMM(CblasTrans, CblasNoTrans, in_dim, out_dim, ins_num, alpha, x_data, dout_data, beta, dw_data, slot_pairs_num, in_dim * ins_num, ins_num * out_dim); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using GPUCtx = phi::GPUContext; REGISTER_OP_CUDA_KERNEL(batch_fc, ops::BatchFCCUDAKernel<GPUCtx, float>, ops::BatchFCCUDAKernel<GPUCtx, double>); REGISTER_OP_CUDA_KERNEL(batch_fc_grad, ops::BatchFCGradOpCUDAKernel<GPUCtx, float>, ops::BatchFCGradOpCUDAKernel<GPUCtx, double>);
a34f0be4833e3414517645907552f4688d4225e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tensor/gpu_sparse_tensor.h" #include "tensor/cpu_sparse_tensor.h" #include "tensor/t_data.h" #include "tensor/gpu_dense_tensor.h" #include "tensor/gpu_reduce_kernel.h" #include "util/mem_holder.h" #include <cstring> #include <cassert> namespace gnn { template<typename Dtype> TensorTemplate<GPU, SPARSE, Dtype>::TensorTemplate() : data(nullptr) { } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::Reshape(std::vector<size_t> l) { ASSERT(l.size() == 2, "only support sparse matrix"); this->shape.Reshape(l); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ResizeSp(int newNNZ, int newNPtr) { if (this->data == nullptr) this->data = std::make_shared< SparseData<GPU, Dtype> >(); if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = ::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = ::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SparseData<GPU, Dtype> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template<typename Dtype> MatType TensorTemplate<GPU, SPARSE, Dtype>::GetMatType() { return MatType::sparse; } template<typename Dtype> MatMode TensorTemplate<GPU, SPARSE, Dtype>::GetMatMode() { return MatMode::gpu; } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::CopyFrom(SpTensor<CPU, Dtype>& src) { this->shape = src.shape; ResizeSp(src.data->nnz, src.data->len_ptr); hipMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyHostToDevice); hipMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyHostToDevice); hipMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, hipMemcpyHostToDevice); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::CopyFrom(SpTensor<GPU, Dtype>& src) { this->shape = src.shape; ResizeSp(src.data->nnz, src.data->len_ptr); hipMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyDeviceToDevice); hipMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyDeviceToDevice); hipMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, hipMemcpyDeviceToDevice); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ShallowCopy(SpTensor<GPU, Dtype>& src) { this->shape = src.shape; this->data = src.data; } template<typename dstDtype, typename srcDtype> __global__ void SparseMatColReduceKernel(dstDtype* dst, int* row_ptr, int* col_idx, srcDtype *val) { __shared__ dstDtype buffer[REDUCE_THREADS]; int i_start = row_ptr[blockIdx.x] + threadIdx.x; int i_end = row_ptr[blockIdx.x + 1]; int i_step = blockDim.x; if (i_start < i_end) buffer[threadIdx.x] = i_start; for (int i = i_start + i_step; i < i_end; i += i_step) { if (val[i] > val[buffer[threadIdx.x]]) buffer[threadIdx.x] = i; } __syncthreads(); int shift; for (int i = REDUCE_THREAD_BITS - 1; i >= 0; --i) { shift = 1 << i; if (threadIdx.x < shift && threadIdx.x + shift < row_ptr[blockIdx.x + 1] - row_ptr[blockIdx.x]) { if (val[buffer[threadIdx.x]] < buffer[threadIdx.x + shift]) buffer[threadIdx.x] = buffer[threadIdx.x + shift]; } __syncthreads(); } if (threadIdx.x == 0) dst[blockIdx.x] = col_idx[buffer[0]]; } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ArgMax(DTensor<GPU, int>& dst, uint axis) { ASSERT(axis == 0, "not supported for axis > 0 in GPU Sparse Tensor"); dst.Reshape({this->shape[0]}); dim3 blocks(this->shape[0]); dim3 threads(REDUCE_THREADS); hipLaunchKernelGGL(( SparseMatColReduceKernel), dim3(blocks), dim3(threads), 0, cudaStreamPerThread, dst.data->ptr, data->row_ptr, data->col_idx, data->val); } template class TensorTemplate<GPU, SPARSE, float>; template class TensorTemplate<GPU, SPARSE, double>; TensorTemplate<GPU, SPARSE, int>::TensorTemplate() : data(nullptr) { } void TensorTemplate<GPU, SPARSE, int>::Reshape(std::vector<size_t> l) { } MatType TensorTemplate<GPU, SPARSE, int>::GetMatType() { return MatType::sparse; } MatMode TensorTemplate<GPU, SPARSE, int>::GetMatMode() { return MatMode::gpu; } void TensorTemplate<GPU, SPARSE, int>::ShallowCopy(SpTensor<GPU, int>& src) { this->shape = src.shape; this->data = src.data; } void TensorTemplate<GPU, SPARSE, int>::ResizeSp(int newNNZ, int newNPtr) { if (this->data == nullptr) this->data = std::make_shared< SparseData<GPU, int> >(); if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = ::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = ::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SparseData<GPU, int> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template class TensorTemplate<GPU, SPARSE, int>; }
a34f0be4833e3414517645907552f4688d4225e8.cu
#include "tensor/gpu_sparse_tensor.h" #include "tensor/cpu_sparse_tensor.h" #include "tensor/t_data.h" #include "tensor/gpu_dense_tensor.h" #include "tensor/gpu_reduce_kernel.h" #include "util/mem_holder.h" #include <cstring> #include <cassert> namespace gnn { template<typename Dtype> TensorTemplate<GPU, SPARSE, Dtype>::TensorTemplate() : data(nullptr) { } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::Reshape(std::vector<size_t> l) { ASSERT(l.size() == 2, "only support sparse matrix"); this->shape.Reshape(l); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ResizeSp(int newNNZ, int newNPtr) { if (this->data == nullptr) this->data = std::make_shared< SparseData<GPU, Dtype> >(); if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = std::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = std::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SparseData<GPU, Dtype> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template<typename Dtype> MatType TensorTemplate<GPU, SPARSE, Dtype>::GetMatType() { return MatType::sparse; } template<typename Dtype> MatMode TensorTemplate<GPU, SPARSE, Dtype>::GetMatMode() { return MatMode::gpu; } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::CopyFrom(SpTensor<CPU, Dtype>& src) { this->shape = src.shape; ResizeSp(src.data->nnz, src.data->len_ptr); cudaMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyHostToDevice); cudaMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyHostToDevice); cudaMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyHostToDevice); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::CopyFrom(SpTensor<GPU, Dtype>& src) { this->shape = src.shape; ResizeSp(src.data->nnz, src.data->len_ptr); cudaMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyDeviceToDevice); cudaMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyDeviceToDevice); cudaMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyDeviceToDevice); } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ShallowCopy(SpTensor<GPU, Dtype>& src) { this->shape = src.shape; this->data = src.data; } template<typename dstDtype, typename srcDtype> __global__ void SparseMatColReduceKernel(dstDtype* dst, int* row_ptr, int* col_idx, srcDtype *val) { __shared__ dstDtype buffer[REDUCE_THREADS]; int i_start = row_ptr[blockIdx.x] + threadIdx.x; int i_end = row_ptr[blockIdx.x + 1]; int i_step = blockDim.x; if (i_start < i_end) buffer[threadIdx.x] = i_start; for (int i = i_start + i_step; i < i_end; i += i_step) { if (val[i] > val[buffer[threadIdx.x]]) buffer[threadIdx.x] = i; } __syncthreads(); int shift; for (int i = REDUCE_THREAD_BITS - 1; i >= 0; --i) { shift = 1 << i; if (threadIdx.x < shift && threadIdx.x + shift < row_ptr[blockIdx.x + 1] - row_ptr[blockIdx.x]) { if (val[buffer[threadIdx.x]] < buffer[threadIdx.x + shift]) buffer[threadIdx.x] = buffer[threadIdx.x + shift]; } __syncthreads(); } if (threadIdx.x == 0) dst[blockIdx.x] = col_idx[buffer[0]]; } template<typename Dtype> void TensorTemplate<GPU, SPARSE, Dtype>::ArgMax(DTensor<GPU, int>& dst, uint axis) { ASSERT(axis == 0, "not supported for axis > 0 in GPU Sparse Tensor"); dst.Reshape({this->shape[0]}); dim3 blocks(this->shape[0]); dim3 threads(REDUCE_THREADS); SparseMatColReduceKernel<<<blocks, threads, 0, cudaStreamPerThread>>> (dst.data->ptr, data->row_ptr, data->col_idx, data->val); } template class TensorTemplate<GPU, SPARSE, float>; template class TensorTemplate<GPU, SPARSE, double>; TensorTemplate<GPU, SPARSE, int>::TensorTemplate() : data(nullptr) { } void TensorTemplate<GPU, SPARSE, int>::Reshape(std::vector<size_t> l) { } MatType TensorTemplate<GPU, SPARSE, int>::GetMatType() { return MatType::sparse; } MatMode TensorTemplate<GPU, SPARSE, int>::GetMatMode() { return MatMode::gpu; } void TensorTemplate<GPU, SPARSE, int>::ShallowCopy(SpTensor<GPU, int>& src) { this->shape = src.shape; this->data = src.data; } void TensorTemplate<GPU, SPARSE, int>::ResizeSp(int newNNZ, int newNPtr) { if (this->data == nullptr) this->data = std::make_shared< SparseData<GPU, int> >(); if (newNNZ > data->nzCap || newNPtr > data->ptrCap) { if (newNNZ > data->nzCap) data->nzCap = std::max(newNNZ, data->nzCap * 2); if (newNPtr > data->ptrCap) data->ptrCap = std::max(newNPtr, data->ptrCap * 2); data = std::make_shared< SparseData<GPU, int> >(data->nzCap, data->ptrCap); } data->nnz = newNNZ; data->len_ptr = newNPtr; } template class TensorTemplate<GPU, SPARSE, int>; }
c0f3b42e861d12b5a82126e0028c56181b352099.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) #define Mask_width 5 #define Mask_radius Mask_width / 2 #define TILE_WIDTH 16 #define w (TILE_WIDTH + Mask_width - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) //@@ INSERT CODE HERE int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ INSERT CODE HERE wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ INSERT CODE HERE wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE hipLaunchKernelGGL(( convolution), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ INSERT CODE HERE hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); //@@ Insert code here free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
c0f3b42e861d12b5a82126e0028c56181b352099.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) #define Mask_width 5 #define Mask_radius Mask_width / 2 #define TILE_WIDTH 16 #define w (TILE_WIDTH + Mask_width - 1) #define clamp(x) (min(max((x), 0.0), 1.0)) //@@ INSERT CODE HERE int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); //@@ INSERT CODE HERE wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); //@@ INSERT CODE HERE wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE convolution<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); //@@ INSERT CODE HERE cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); //@@ Insert code here free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
654598e0ee9c1b395074d9223ee2352f1ca1bf67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hertz_constants.h" #include "hertz_cudaneighlist.h" #include "pair_interaction.h" #include "framework.h" #ifdef TRACE #warning TRACE enabled: timing will not be accurate #include "cuPrintf.hip" #endif #ifndef MAX_GRID_DIM #error You need to #define MAX_GRID_DIM (see Makefile.config) #endif __device__ int get_gid() { return threadIdx.x + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.x * gridDim.x); } // -------------------------------------------------------------------------- // UNPACK PER-PARTICLE DATA // -------------------------------------------------------------------------- __global__ void unpack_ro_data( int K, int *valid, int *dati, int *datj, double *radius, double *radiusi, double *radiusj, double *mass, double *massi, double *massj, int *type, int *typei, int *typej ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; radiusi[gid] = radius[i]; radiusj[gid] = radius[j]; massi[gid] = mass[i]; massj[gid] = mass[j]; typei[gid] = type[i]; typej[gid] = type[j]; } } __global__ void unpack_reload_data( int K, int *valid, int *dati, int *datj, double *x, double *xi, double *xj, double *v, double *vi, double *vj, double *omega, double *omegai, double *omegaj ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; xi[(gid*3)+0] = x[(i*3)+0]; xj[(gid*3)+0] = x[(j*3)+0]; xi[(gid*3)+1] = x[(i*3)+1]; xj[(gid*3)+1] = x[(j*3)+1]; xi[(gid*3)+2] = x[(i*3)+2]; xj[(gid*3)+2] = x[(j*3)+2]; vi[(gid*3)+0] = v[(i*3)+0]; vj[(gid*3)+0] = v[(j*3)+0]; vi[(gid*3)+1] = v[(i*3)+1]; vj[(gid*3)+1] = v[(j*3)+1]; vi[(gid*3)+2] = v[(i*3)+2]; vj[(gid*3)+2] = v[(j*3)+2]; omegai[(gid*3)+0] = omega[(i*3)+0]; omegaj[(gid*3)+0] = omega[(j*3)+0]; omegai[(gid*3)+1] = omega[(i*3)+1]; omegaj[(gid*3)+1] = omega[(j*3)+1]; omegai[(gid*3)+2] = omega[(i*3)+2]; omegaj[(gid*3)+2] = omega[(j*3)+2]; } } __global__ void compute( //inputs int K, int *valid, #ifdef TRACE int *dati, int *datj, #endif double *xi, double *xj, double *vi, double *vj, double *omegai, double *omegaj, double *radiusi, double *radiusj, double *massi, double *massj, int *typei, int *typej, //inouts double *fdelta, double *tdeltai, double *tdeltaj, double *shear ) { int gid = get_gid(); if (gid < K && valid[gid]) { pair_interaction( #ifdef TRACE dati[gid], datj[gid], #endif &xi[gid*3], &xj[gid*3], &vi[gid*3], &vj[gid*3], &omegai[gid*3], &omegaj[gid*3], radiusi[gid], radiusj[gid], massi[gid], massj[gid], typei[gid], typej[gid], &shear[gid*3], &fdelta[gid*3], /*fdeltaj is*/NULL, &tdeltai[gid*3], &tdeltaj[gid*3] ); } } __global__ void collect( //inputs int N, double *fdelta, double *tdeltai, double *tdeltaj, int *off, int *len, #if HALFNL int *tad, int *ffo, int *nel, #endif //inouts double *force, double *torque ) { int gid = get_gid(); double fsum[3] = {0,0,0}; double tsum[3] = {0,0,0}; if (gid < N) { int offset = off[gid]; for (int k=0; k<len[gid]; k++) { int idx = offset+k; fsum[0] += fdelta[(idx*3)+0]; fsum[1] += fdelta[(idx*3)+1]; fsum[2] += fdelta[(idx*3)+2]; tsum[0] += tdeltai[(idx*3)+0]; tsum[1] += tdeltai[(idx*3)+1]; tsum[2] += tdeltai[(idx*3)+2]; } #if HALFNL offset = ffo[gid]; for (int k=0; k<nel[gid]; k++) { int idx = tad[offset+k]; fsum[0] -= fdelta[(idx*3)+0]; fsum[1] -= fdelta[(idx*3)+1]; fsum[2] -= fdelta[(idx*3)+2]; tsum[0] += tdeltaj[(idx*3)+0]; tsum[1] += tdeltaj[(idx*3)+1]; tsum[2] += tdeltaj[(idx*3)+2]; } #endif force[(gid*3)] += fsum[0]; force[(gid*3)+1] += fsum[1]; force[(gid*3)+2] += fsum[2]; torque[(gid*3)] += tsum[0]; torque[(gid*3)+1] += tsum[1]; torque[(gid*3)+2] += tsum[2]; } } using namespace std; // DEVICE STRUCTURES // INPUTS // packed // unpacked(i) // unpacked(j) double *d_x; double *d_xi; double *d_xj; // ] reload double *d_v; double *d_vi; double *d_vj; // ] double *d_omega; double *d_omegai; double *d_omegaj; // ] double *d_radius; double *d_radiusi; double *d_radiusj; // ] ro double *d_mass; double *d_massi; double *d_massj; // ] int *d_type; int *d_typei; int *d_typej; // ] // OUTPUTS // packed // unpacked(i) // unpacked(j) double *d_force; double *d_fdelta; double *d_torque; double *d_tdeltai; double *d_tdeltaj; // d_shear in d_nl void no_cuda_error(const char *errmsg) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("[ERROR] %s\n", errmsg); printf("[ERROR] %d: %s\n", err, hipGetErrorString(err)); size_t free; size_t total; if (hipMemGetInfo(&free, &total) == hipSuccess) { printf("[ERROR] mem free=%zubytes total=%zubytes\n", free, total); } exit(1); } } #define NLEN(type,arity) (nparticles*arity*sizeof(type)) #define KLEN(type,arity) (nneighbors*arity*sizeof(type)) void init_dev_structures(int nparticles, int nneighbors) { //packed hipMalloc((void **)&d_x, NLEN(double,3)); hipMalloc((void **)&d_v, NLEN(double,3)); hipMalloc((void **)&d_omega, NLEN(double,3)); hipMalloc((void **)&d_radius, NLEN(double,1)); hipMalloc((void **)&d_mass, NLEN(double,1)); hipMalloc((void **)&d_type, NLEN(int, 1)); //unpacked(i) hipMalloc((void **)&d_xi, KLEN(double,3)); hipMalloc((void **)&d_vi, KLEN(double,3)); hipMalloc((void **)&d_omegai, KLEN(double,3)); hipMalloc((void **)&d_radiusi, KLEN(double,1)); hipMalloc((void **)&d_massi, KLEN(double,1)); hipMalloc((void **)&d_typei, KLEN(int ,1)); //unpacked(j) hipMalloc((void **)&d_xj, KLEN(double,3)); hipMalloc((void **)&d_vj, KLEN(double,3)); hipMalloc((void **)&d_omegaj, KLEN(double,3)); hipMalloc((void **)&d_radiusj, KLEN(double,1)); hipMalloc((void **)&d_massj, KLEN(double,1)); hipMalloc((void **)&d_typej, KLEN(int ,1)); //outputs hipMalloc((void **)&d_force, NLEN(double,3)); hipMalloc((void **)&d_torque, NLEN(double,3)); hipMalloc((void **)&d_fdelta, KLEN(double,3)); hipMalloc((void **)&d_tdeltai, KLEN(double,3)); hipMalloc((void **)&d_tdeltaj, KLEN(double,3)); } void free_dev_structures() { //packed hipFree(d_x); hipFree(d_v); hipFree(d_omega); hipFree(d_radius); hipFree(d_mass); hipFree(d_type); //unpacked(i) hipFree(d_xi); hipFree(d_vi); hipFree(d_omegai); hipFree(d_radiusi); hipFree(d_massi); hipFree(d_typei); //unpacked(j) hipFree(d_xj); hipFree(d_vj); hipFree(d_omegaj); hipFree(d_radiusj); hipFree(d_massj); hipFree(d_typej); //outputs hipFree(d_force); hipFree(d_torque); hipFree(d_fdelta); hipFree(d_tdeltai); hipFree(d_tdeltaj); } void run(struct params *input, int num_iter) { NeighListLike *nl = new NeighListLike(input); int block_size = BLOCK_SIZE; int nparticles = input->nnode; dim3 tpa_grid_size( min(nparticles/block_size, MAX_GRID_DIM), max((int)ceil(((float)nparticles/block_size)/MAX_GRID_DIM), 1)); int nneighbors = nl->maxpage * nl->pgsize; dim3 tpn_grid_size( min(nneighbors/block_size, MAX_GRID_DIM), max((int)ceil(((float)nneighbors/block_size)/MAX_GRID_DIM), 1)); #if DEBUG printf("block_size = %d\n", block_size); printf("nparticles = %d\n", nparticles); printf("nneighbors = %d -> %d (maxpage=%d, pgsize=%d)\n", input->nedge, nneighbors, nl->maxpage, nl->pgsize); printf("tpa_grid = { %d, %d, %d }\n", tpa_grid_size.x, tpa_grid_size.y, tpa_grid_size.z); printf("tpn_grid = { %d, %d, %d }\n", tpn_grid_size.x, tpn_grid_size.y, tpn_grid_size.z); #endif //ONE-TIME COSTS one_time.push_back(SimpleTimer("hertz_consts")); one_time.back().start(); setup_hertz_constants(input); one_time.back().stop_and_add_to_total(); no_cuda_error("hertz_constants"); one_time.push_back(SimpleTimer("init_nl")); one_time.back().start(); HertzCudaNeighList *d_nl = new HertzCudaNeighList( block_size, input->nnode, nl->maxpage, nl->pgsize); one_time.back().stop_and_add_to_total(); no_cuda_error("init_nl"); one_time.push_back(SimpleTimer("malloc")); one_time.back().start(); init_dev_structures(nparticles, nneighbors); one_time.back().stop_and_add_to_total(); no_cuda_error("init_dev_structures"); one_time.push_back(SimpleTimer("memcpy")); one_time.back().start(); hipMemcpy(d_force, input->force, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_torque, input->torque, NLEN(double,3), hipMemcpyHostToDevice); one_time.back().stop_and_add_to_total(); no_cuda_error("memcpy"); //NL-REFRESH COSTS nl_refresh.push_back(SimpleTimer("nl_reload")); nl_refresh.back().start(); d_nl->reload( nl->numneigh, nl->firstneigh, nl->pages, nl->maxpage, nl->dpages, nl->tpages); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("nl_reload"); nl_refresh.push_back(SimpleTimer("memcpy_unpack")); nl_refresh.back().start(); hipMemcpy(d_radius, input->radius, NLEN(double,1), hipMemcpyHostToDevice); hipMemcpy(d_mass, input->mass, NLEN(double,1), hipMemcpyHostToDevice); hipMemcpy(d_type, input->type, NLEN(int,1), hipMemcpyHostToDevice); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("memcpy_unpack"); nl_refresh.push_back(SimpleTimer("unpack_ro")); nl_refresh.back().start(); hipLaunchKernelGGL(( unpack_ro_data), dim3(tpn_grid_size), dim3(block_size), 0, 0, nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_radius, d_radiusi, d_radiusj, d_mass, d_massi, d_massj, d_type, d_typei, d_typej ); hipDeviceSynchronize(); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("unpack_ro"); // PER-ITER COSTS per_iter.push_back(SimpleTimer("memcpy_reload")); per_iter.push_back(SimpleTimer("unpack_reload")); per_iter.push_back(SimpleTimer("memset_delta")); per_iter.push_back(SimpleTimer("compute")); per_iter.push_back(SimpleTimer("collect")); per_iter.push_back(SimpleTimer("memcpy_results")); for (int i=0; i<(int)per_iter.size(); i++) { per_iter_timings.push_back(vector<double>(num_iter)); } double *force = new double[nparticles*3]; double *torque = new double[nparticles*3]; for (int run=0; run<num_iter; run++) { //make copies nl->restore(); d_nl->load_shear(nl->dpages); no_cuda_error("make_copies"); end_to_end.start(); //load data onto device per_iter[0].start(); hipMemcpy(d_x, input->x, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_v, input->v, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_omega, input->omega, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_force, input->force, NLEN(double,3), hipMemcpyHostToDevice); hipMemcpy(d_torque, input->torque, NLEN(double,3), hipMemcpyHostToDevice); double d0 = per_iter[0].stop_and_add_to_total(); per_iter_timings[0][run] = d0; no_cuda_error("memcpy_reload"); //TODO: check if realloc of unpacked ij data necessary per_iter[1].start(); hipLaunchKernelGGL(( unpack_reload_data), dim3(tpn_grid_size), dim3(block_size), 0, 0, nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_x, d_xi, d_xj, d_v, d_vi, d_vj, d_omega, d_omegai, d_omegaj ); hipDeviceSynchronize(); double d1 = per_iter[1].stop_and_add_to_total(); per_iter_timings[1][run] = d1; no_cuda_error("unpack_reload"); per_iter[2].start(); hipMemset(d_fdelta, 0, KLEN(double,3)); hipMemset(d_tdeltai, 0, KLEN(double,3)); hipMemset(d_tdeltaj, 0, KLEN(double,3)); double d2 = per_iter[2].stop_and_add_to_total(); per_iter_timings[2][run] = d2; no_cuda_error("memset_delta"); per_iter[3].start(); #ifdef TRACE cudaPrintfInit(); #endif hipLaunchKernelGGL(( compute), dim3(tpn_grid_size), dim3(block_size), 0, 0, nneighbors, d_nl->d_valid, #ifdef TRACE d_nl->d_dati, d_nl->d_neighidx, #endif d_xi, d_xj, d_vi, d_vj, d_omegai, d_omegaj, d_radiusi, d_radiusj, d_massi, d_massj, d_typei, d_typej, //outputs d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_shear ); hipDeviceSynchronize(); double d3 = per_iter[3].stop_and_add_to_total(); per_iter_timings[3][run] = d3; no_cuda_error("compute"); #ifdef TRACE cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif per_iter[4].start(); hipLaunchKernelGGL(( collect), dim3(tpa_grid_size), dim3(block_size), 0, 0, nparticles, d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_offset, d_nl->d_numneigh, #if HALFNL d_nl->d_tad, d_nl->d_ffo, d_nl->d_nel, #endif d_force, d_torque); hipDeviceSynchronize(); double d4 = per_iter[4].stop_and_add_to_total(); per_iter_timings[4][run] = d4; no_cuda_error("collect"); //offload data from device //(see note on shear history below) per_iter[5].start(); hipMemcpy(force, d_force, NLEN(double,3), hipMemcpyDeviceToHost); hipMemcpy(torque, d_torque, NLEN(double,3), hipMemcpyDeviceToHost); double d5 = per_iter[5].stop_and_add_to_total(); per_iter_timings[5][run] = d5; no_cuda_error("memcpy_results"); double d6 = end_to_end.stop_and_add_to_total(); end_to_end_timings.push_back(d6); //NB: we assume that shear history is *not* required from the device //so this cost is not included in "memcpy_results" d_nl->unload_shear(nl->dpages); check_result(input, nl, force, torque, nl->firstdouble, /*threshold=*/0.5, /*verbose=*/false, /*die_on_flag=*/true); } delete[] force; delete[] torque; free_dev_structures(); no_cuda_error("free_dev_structures"); }
654598e0ee9c1b395074d9223ee2352f1ca1bf67.cu
#include "hertz_constants.h" #include "hertz_cudaneighlist.h" #include "pair_interaction.h" #include "framework.h" #ifdef TRACE #warning TRACE enabled: timing will not be accurate #include "cuPrintf.cu" #endif #ifndef MAX_GRID_DIM #error You need to #define MAX_GRID_DIM (see Makefile.config) #endif __device__ int get_gid() { return threadIdx.x + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.x * gridDim.x); } // -------------------------------------------------------------------------- // UNPACK PER-PARTICLE DATA // -------------------------------------------------------------------------- __global__ void unpack_ro_data( int K, int *valid, int *dati, int *datj, double *radius, double *radiusi, double *radiusj, double *mass, double *massi, double *massj, int *type, int *typei, int *typej ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; radiusi[gid] = radius[i]; radiusj[gid] = radius[j]; massi[gid] = mass[i]; massj[gid] = mass[j]; typei[gid] = type[i]; typej[gid] = type[j]; } } __global__ void unpack_reload_data( int K, int *valid, int *dati, int *datj, double *x, double *xi, double *xj, double *v, double *vi, double *vj, double *omega, double *omegai, double *omegaj ) { int gid = get_gid(); if (gid < K && valid[gid]) { int i = dati[gid]; int j = datj[gid]; xi[(gid*3)+0] = x[(i*3)+0]; xj[(gid*3)+0] = x[(j*3)+0]; xi[(gid*3)+1] = x[(i*3)+1]; xj[(gid*3)+1] = x[(j*3)+1]; xi[(gid*3)+2] = x[(i*3)+2]; xj[(gid*3)+2] = x[(j*3)+2]; vi[(gid*3)+0] = v[(i*3)+0]; vj[(gid*3)+0] = v[(j*3)+0]; vi[(gid*3)+1] = v[(i*3)+1]; vj[(gid*3)+1] = v[(j*3)+1]; vi[(gid*3)+2] = v[(i*3)+2]; vj[(gid*3)+2] = v[(j*3)+2]; omegai[(gid*3)+0] = omega[(i*3)+0]; omegaj[(gid*3)+0] = omega[(j*3)+0]; omegai[(gid*3)+1] = omega[(i*3)+1]; omegaj[(gid*3)+1] = omega[(j*3)+1]; omegai[(gid*3)+2] = omega[(i*3)+2]; omegaj[(gid*3)+2] = omega[(j*3)+2]; } } __global__ void compute( //inputs int K, int *valid, #ifdef TRACE int *dati, int *datj, #endif double *xi, double *xj, double *vi, double *vj, double *omegai, double *omegaj, double *radiusi, double *radiusj, double *massi, double *massj, int *typei, int *typej, //inouts double *fdelta, double *tdeltai, double *tdeltaj, double *shear ) { int gid = get_gid(); if (gid < K && valid[gid]) { pair_interaction( #ifdef TRACE dati[gid], datj[gid], #endif &xi[gid*3], &xj[gid*3], &vi[gid*3], &vj[gid*3], &omegai[gid*3], &omegaj[gid*3], radiusi[gid], radiusj[gid], massi[gid], massj[gid], typei[gid], typej[gid], &shear[gid*3], &fdelta[gid*3], /*fdeltaj is*/NULL, &tdeltai[gid*3], &tdeltaj[gid*3] ); } } __global__ void collect( //inputs int N, double *fdelta, double *tdeltai, double *tdeltaj, int *off, int *len, #if HALFNL int *tad, int *ffo, int *nel, #endif //inouts double *force, double *torque ) { int gid = get_gid(); double fsum[3] = {0,0,0}; double tsum[3] = {0,0,0}; if (gid < N) { int offset = off[gid]; for (int k=0; k<len[gid]; k++) { int idx = offset+k; fsum[0] += fdelta[(idx*3)+0]; fsum[1] += fdelta[(idx*3)+1]; fsum[2] += fdelta[(idx*3)+2]; tsum[0] += tdeltai[(idx*3)+0]; tsum[1] += tdeltai[(idx*3)+1]; tsum[2] += tdeltai[(idx*3)+2]; } #if HALFNL offset = ffo[gid]; for (int k=0; k<nel[gid]; k++) { int idx = tad[offset+k]; fsum[0] -= fdelta[(idx*3)+0]; fsum[1] -= fdelta[(idx*3)+1]; fsum[2] -= fdelta[(idx*3)+2]; tsum[0] += tdeltaj[(idx*3)+0]; tsum[1] += tdeltaj[(idx*3)+1]; tsum[2] += tdeltaj[(idx*3)+2]; } #endif force[(gid*3)] += fsum[0]; force[(gid*3)+1] += fsum[1]; force[(gid*3)+2] += fsum[2]; torque[(gid*3)] += tsum[0]; torque[(gid*3)+1] += tsum[1]; torque[(gid*3)+2] += tsum[2]; } } using namespace std; // DEVICE STRUCTURES // INPUTS // packed // unpacked(i) // unpacked(j) double *d_x; double *d_xi; double *d_xj; // ] reload double *d_v; double *d_vi; double *d_vj; // ] double *d_omega; double *d_omegai; double *d_omegaj; // ] double *d_radius; double *d_radiusi; double *d_radiusj; // ] ro double *d_mass; double *d_massi; double *d_massj; // ] int *d_type; int *d_typei; int *d_typej; // ] // OUTPUTS // packed // unpacked(i) // unpacked(j) double *d_force; double *d_fdelta; double *d_torque; double *d_tdeltai; double *d_tdeltaj; // d_shear in d_nl void no_cuda_error(const char *errmsg) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("[ERROR] %s\n", errmsg); printf("[ERROR] %d: %s\n", err, cudaGetErrorString(err)); size_t free; size_t total; if (cudaMemGetInfo(&free, &total) == cudaSuccess) { printf("[ERROR] mem free=%zubytes total=%zubytes\n", free, total); } exit(1); } } #define NLEN(type,arity) (nparticles*arity*sizeof(type)) #define KLEN(type,arity) (nneighbors*arity*sizeof(type)) void init_dev_structures(int nparticles, int nneighbors) { //packed cudaMalloc((void **)&d_x, NLEN(double,3)); cudaMalloc((void **)&d_v, NLEN(double,3)); cudaMalloc((void **)&d_omega, NLEN(double,3)); cudaMalloc((void **)&d_radius, NLEN(double,1)); cudaMalloc((void **)&d_mass, NLEN(double,1)); cudaMalloc((void **)&d_type, NLEN(int, 1)); //unpacked(i) cudaMalloc((void **)&d_xi, KLEN(double,3)); cudaMalloc((void **)&d_vi, KLEN(double,3)); cudaMalloc((void **)&d_omegai, KLEN(double,3)); cudaMalloc((void **)&d_radiusi, KLEN(double,1)); cudaMalloc((void **)&d_massi, KLEN(double,1)); cudaMalloc((void **)&d_typei, KLEN(int ,1)); //unpacked(j) cudaMalloc((void **)&d_xj, KLEN(double,3)); cudaMalloc((void **)&d_vj, KLEN(double,3)); cudaMalloc((void **)&d_omegaj, KLEN(double,3)); cudaMalloc((void **)&d_radiusj, KLEN(double,1)); cudaMalloc((void **)&d_massj, KLEN(double,1)); cudaMalloc((void **)&d_typej, KLEN(int ,1)); //outputs cudaMalloc((void **)&d_force, NLEN(double,3)); cudaMalloc((void **)&d_torque, NLEN(double,3)); cudaMalloc((void **)&d_fdelta, KLEN(double,3)); cudaMalloc((void **)&d_tdeltai, KLEN(double,3)); cudaMalloc((void **)&d_tdeltaj, KLEN(double,3)); } void free_dev_structures() { //packed cudaFree(d_x); cudaFree(d_v); cudaFree(d_omega); cudaFree(d_radius); cudaFree(d_mass); cudaFree(d_type); //unpacked(i) cudaFree(d_xi); cudaFree(d_vi); cudaFree(d_omegai); cudaFree(d_radiusi); cudaFree(d_massi); cudaFree(d_typei); //unpacked(j) cudaFree(d_xj); cudaFree(d_vj); cudaFree(d_omegaj); cudaFree(d_radiusj); cudaFree(d_massj); cudaFree(d_typej); //outputs cudaFree(d_force); cudaFree(d_torque); cudaFree(d_fdelta); cudaFree(d_tdeltai); cudaFree(d_tdeltaj); } void run(struct params *input, int num_iter) { NeighListLike *nl = new NeighListLike(input); int block_size = BLOCK_SIZE; int nparticles = input->nnode; dim3 tpa_grid_size( min(nparticles/block_size, MAX_GRID_DIM), max((int)ceil(((float)nparticles/block_size)/MAX_GRID_DIM), 1)); int nneighbors = nl->maxpage * nl->pgsize; dim3 tpn_grid_size( min(nneighbors/block_size, MAX_GRID_DIM), max((int)ceil(((float)nneighbors/block_size)/MAX_GRID_DIM), 1)); #if DEBUG printf("block_size = %d\n", block_size); printf("nparticles = %d\n", nparticles); printf("nneighbors = %d -> %d (maxpage=%d, pgsize=%d)\n", input->nedge, nneighbors, nl->maxpage, nl->pgsize); printf("tpa_grid = { %d, %d, %d }\n", tpa_grid_size.x, tpa_grid_size.y, tpa_grid_size.z); printf("tpn_grid = { %d, %d, %d }\n", tpn_grid_size.x, tpn_grid_size.y, tpn_grid_size.z); #endif //ONE-TIME COSTS one_time.push_back(SimpleTimer("hertz_consts")); one_time.back().start(); setup_hertz_constants(input); one_time.back().stop_and_add_to_total(); no_cuda_error("hertz_constants"); one_time.push_back(SimpleTimer("init_nl")); one_time.back().start(); HertzCudaNeighList *d_nl = new HertzCudaNeighList( block_size, input->nnode, nl->maxpage, nl->pgsize); one_time.back().stop_and_add_to_total(); no_cuda_error("init_nl"); one_time.push_back(SimpleTimer("malloc")); one_time.back().start(); init_dev_structures(nparticles, nneighbors); one_time.back().stop_and_add_to_total(); no_cuda_error("init_dev_structures"); one_time.push_back(SimpleTimer("memcpy")); one_time.back().start(); cudaMemcpy(d_force, input->force, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_torque, input->torque, NLEN(double,3), cudaMemcpyHostToDevice); one_time.back().stop_and_add_to_total(); no_cuda_error("memcpy"); //NL-REFRESH COSTS nl_refresh.push_back(SimpleTimer("nl_reload")); nl_refresh.back().start(); d_nl->reload( nl->numneigh, nl->firstneigh, nl->pages, nl->maxpage, nl->dpages, nl->tpages); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("nl_reload"); nl_refresh.push_back(SimpleTimer("memcpy_unpack")); nl_refresh.back().start(); cudaMemcpy(d_radius, input->radius, NLEN(double,1), cudaMemcpyHostToDevice); cudaMemcpy(d_mass, input->mass, NLEN(double,1), cudaMemcpyHostToDevice); cudaMemcpy(d_type, input->type, NLEN(int,1), cudaMemcpyHostToDevice); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("memcpy_unpack"); nl_refresh.push_back(SimpleTimer("unpack_ro")); nl_refresh.back().start(); unpack_ro_data<<<tpn_grid_size, block_size>>>( nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_radius, d_radiusi, d_radiusj, d_mass, d_massi, d_massj, d_type, d_typei, d_typej ); cudaThreadSynchronize(); nl_refresh.back().stop_and_add_to_total(); no_cuda_error("unpack_ro"); // PER-ITER COSTS per_iter.push_back(SimpleTimer("memcpy_reload")); per_iter.push_back(SimpleTimer("unpack_reload")); per_iter.push_back(SimpleTimer("memset_delta")); per_iter.push_back(SimpleTimer("compute")); per_iter.push_back(SimpleTimer("collect")); per_iter.push_back(SimpleTimer("memcpy_results")); for (int i=0; i<(int)per_iter.size(); i++) { per_iter_timings.push_back(vector<double>(num_iter)); } double *force = new double[nparticles*3]; double *torque = new double[nparticles*3]; for (int run=0; run<num_iter; run++) { //make copies nl->restore(); d_nl->load_shear(nl->dpages); no_cuda_error("make_copies"); end_to_end.start(); //load data onto device per_iter[0].start(); cudaMemcpy(d_x, input->x, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_v, input->v, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_omega, input->omega, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_force, input->force, NLEN(double,3), cudaMemcpyHostToDevice); cudaMemcpy(d_torque, input->torque, NLEN(double,3), cudaMemcpyHostToDevice); double d0 = per_iter[0].stop_and_add_to_total(); per_iter_timings[0][run] = d0; no_cuda_error("memcpy_reload"); //TODO: check if realloc of unpacked ij data necessary per_iter[1].start(); unpack_reload_data<<<tpn_grid_size, block_size>>>( nneighbors, d_nl->d_valid, d_nl->d_dati, d_nl->d_neighidx, d_x, d_xi, d_xj, d_v, d_vi, d_vj, d_omega, d_omegai, d_omegaj ); cudaThreadSynchronize(); double d1 = per_iter[1].stop_and_add_to_total(); per_iter_timings[1][run] = d1; no_cuda_error("unpack_reload"); per_iter[2].start(); cudaMemset(d_fdelta, 0, KLEN(double,3)); cudaMemset(d_tdeltai, 0, KLEN(double,3)); cudaMemset(d_tdeltaj, 0, KLEN(double,3)); double d2 = per_iter[2].stop_and_add_to_total(); per_iter_timings[2][run] = d2; no_cuda_error("memset_delta"); per_iter[3].start(); #ifdef TRACE cudaPrintfInit(); #endif compute<<<tpn_grid_size, block_size>>>( nneighbors, d_nl->d_valid, #ifdef TRACE d_nl->d_dati, d_nl->d_neighidx, #endif d_xi, d_xj, d_vi, d_vj, d_omegai, d_omegaj, d_radiusi, d_radiusj, d_massi, d_massj, d_typei, d_typej, //outputs d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_shear ); cudaThreadSynchronize(); double d3 = per_iter[3].stop_and_add_to_total(); per_iter_timings[3][run] = d3; no_cuda_error("compute"); #ifdef TRACE cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); #endif per_iter[4].start(); collect<<<tpa_grid_size, block_size>>>( nparticles, d_fdelta, d_tdeltai, d_tdeltaj, d_nl->d_offset, d_nl->d_numneigh, #if HALFNL d_nl->d_tad, d_nl->d_ffo, d_nl->d_nel, #endif d_force, d_torque); cudaThreadSynchronize(); double d4 = per_iter[4].stop_and_add_to_total(); per_iter_timings[4][run] = d4; no_cuda_error("collect"); //offload data from device //(see note on shear history below) per_iter[5].start(); cudaMemcpy(force, d_force, NLEN(double,3), cudaMemcpyDeviceToHost); cudaMemcpy(torque, d_torque, NLEN(double,3), cudaMemcpyDeviceToHost); double d5 = per_iter[5].stop_and_add_to_total(); per_iter_timings[5][run] = d5; no_cuda_error("memcpy_results"); double d6 = end_to_end.stop_and_add_to_total(); end_to_end_timings.push_back(d6); //NB: we assume that shear history is *not* required from the device //so this cost is not included in "memcpy_results" d_nl->unload_shear(nl->dpages); check_result(input, nl, force, torque, nl->firstdouble, /*threshold=*/0.5, /*verbose=*/false, /*die_on_flag=*/true); } delete[] force; delete[] torque; free_dev_structures(); no_cuda_error("free_dev_structures"); }
bb0d64594e0208f4626d6d76dba3695c55ddc5a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void lots_of_double_compute(double *inputs, int N, size_t niters, double *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for ( ; tid < N; tid += nthreads) { size_t iter; double val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0) - 101.0; val = (val / 3.0) + 102.0; val = (val + 1.07) - 103.0; val = (val / 1.037) + 104.0; val = (val + 3.00) - 105.0; val = (val / 0.22) + 106.0; } outputs[tid] = val; } }
bb0d64594e0208f4626d6d76dba3695c55ddc5a5.cu
#include "includes.h" __global__ void lots_of_double_compute(double *inputs, int N, size_t niters, double *outputs) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; size_t nthreads = gridDim.x * blockDim.x; for ( ; tid < N; tid += nthreads) { size_t iter; double val = inputs[tid]; for (iter = 0; iter < niters; iter++) { val = (val + 5.0) - 101.0; val = (val / 3.0) + 102.0; val = (val + 1.07) - 103.0; val = (val / 1.037) + 104.0; val = (val + 3.00) - 105.0; val = (val / 0.22) + 106.0; } outputs[tid] = val; } }
496be6c0262bff351cc752e79774ec868b8e5b22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //xfail:ASSERTION_ERROR //--blockDim=2 --gridDim=2 typedef struct { unsigned int a, b; } pair; __device__ void assertion(pair A) { __assert(false); } __global__ void test(pair A) { assertion(A); }
496be6c0262bff351cc752e79774ec868b8e5b22.cu
//xfail:ASSERTION_ERROR //--blockDim=2 --gridDim=2 typedef struct { unsigned int a, b; } pair; __device__ void assertion(pair A) { __assert(false); } __global__ void test(pair A) { assertion(A); }
bb78e136d0620f89339a15508f420c74366fac39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl_diag.cu normal z -> c, Fri Jan 30 19:00:09 2015 */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void clascl_diag_lower(int m, int n, magmaFloatComplex_const_ptr D, int ldd, magmaFloatComplex_ptr A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[j+j*ldd]; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void clascl_diag_upper(int m, int n, magmaFloatComplex_const_ptr D, int ldd, magmaFloatComplex_ptr A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[ind+ind*ldd]; } } /** Purpose ------- CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dD, magma_int_t lddd, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( clascl_diag_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( clascl_diag_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, lddd, dA, ldda); } } /** @see magmablas_clascl2_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl_diag( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dD, magma_int_t lddd, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_clascl_diag_q( type, m, n, dD, lddd, dA, ldda, info, magma_stream ); }
bb78e136d0620f89339a15508f420c74366fac39.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl_diag.cu normal z -> c, Fri Jan 30 19:00:09 2015 */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void clascl_diag_lower(int m, int n, magmaFloatComplex_const_ptr D, int ldd, magmaFloatComplex_ptr A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[j+j*ldd]; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void clascl_diag_upper(int m, int n, magmaFloatComplex_const_ptr D, int ldd, magmaFloatComplex_ptr A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] /= D[ind+ind*ldd]; } } /** Purpose ------- CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl_diag_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dD, magma_int_t lddd, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { clascl_diag_lower <<< grid, threads, 0, queue >>> (m, n, dD, lddd, dA, ldda); } else if (type == MagmaUpper) { clascl_diag_upper <<< grid, threads, 0, queue >>> (m, n, dD, lddd, dA, ldda); } } /** @see magmablas_clascl2_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl_diag( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dD, magma_int_t lddd, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_clascl_diag_q( type, m, n, dD, lddd, dA, ldda, info, magma_stream ); }
ec96a7bfec463da681f2f3d4509058bc5453e285.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; // idx of face float dist; // abs distance of pixel to face float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Faces with at least one vertex behind the camera won't render correctly // and should be removed or clipped before calling the rasterizer const bool z_invalid = zlims.x < kEpsilon; // Check if the current point is oustside the triangle bounding box. return ( pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || z_invalid); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxiliary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int64_t* clipped_faces_neighbor_idx, // (F,) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } // Handle the case where a face (f) partially behind the image plane is // clipped to a quadrilateral and then split into two faces (t1, t2). In this // case we: // 1. Find the index of the neighboring face (e.g. for t1 need index of t2) // 2. Check if the neighboring face (t2) is already in the top K faces // 3. If yes, compare the distance of the pixel to t1 with the distance to t2. // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces. const int neighbor_idx = clipped_faces_neighbor_idx[face_idx]; int neighbor_idx_top_k = -1; // Check if neighboring face is already in the top K. // -1 is the fill value in clipped_faces_neighbor_idx if (neighbor_idx != -1) { // Only need to loop until q_size. for (int i = 0; i < q_size; i++) { if (q[i].idx == neighbor_idx) { neighbor_idx_top_k = i; break; } } } // If neighbor idx is not -1 then it is in the top K struct. if (neighbor_idx_top_k != -1) { // If dist of current face is less than neighbor then overwrite the // neighbor face values in the top K struct. float neighbor_dist = abs(q[neighbor_idx_top_k].dist); if (dist < neighbor_dist) { // Overwrite the neighbor face values q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip}; // If pz > q_max then overwrite the max values and index of the max. // q_size stays the same. if (pz > q_max_z) { q_max_z = pz; q_max_idx = neighbor_idx_top_k; } } } else { // Handle as a normal face if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const int64_t* clipped_faces_neighbor_idx, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordinates of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, clipped_faces_neighbor_idx, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const at::Tensor& clipped_faces_neighbor_idx, const std::tuple<int, int> image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); TORCH_CHECK( clipped_faces_neighbor_idx.size(0) == face_verts.size(0), "clipped_faces_neighbor_idx must have save size first dimension as face_verts"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}, clipped_faces_neighbor_idx_t{ clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t, clipped_faces_neighbor_idx_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, BH, BW, T) const int64_t* clipped_faces_neighbor_idx, // (F,) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int BH, const int BW, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists, // (N, H, W, K) float* bary // (N, H, W, K, 3) ) { // This can be more than H * W if H or W are not divisible by bin_size. int num_pixels = N * BH * BW * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; // bin index y const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; // bin index y const int bx = i / (bin_size * bin_size); // pixel within the bin i %= bin_size * bin_size; // Pixel x, y indices const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, clipped_faces_neighbor_idx, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const at::Tensor& clipped_faces_neighbor_idx, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); TORCH_CHECK( clipped_faces_neighbor_idx.size(0) == face_verts.size(0), "clipped_faces_neighbor_idx must have the same first dimension as face_verts"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}, clipped_faces_neighbor_idx_t{ clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU( c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // bin_faces shape (N, BH, BW, M) const int N = bin_faces.size(0); const int BH = bin_faces.size(1); const int BW = bin_faces.size(2); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, BH, BW, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
ec96a7bfec463da681f2f3d4509058bc5453e285.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; // idx of face float dist; // abs distance of pixel to face float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Faces with at least one vertex behind the camera won't render correctly // and should be removed or clipped before calling the rasterizer const bool z_invalid = zlims.x < kEpsilon; // Check if the current point is oustside the triangle bounding box. return ( pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || z_invalid); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxiliary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int64_t* clipped_faces_neighbor_idx, // (F,) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } // Handle the case where a face (f) partially behind the image plane is // clipped to a quadrilateral and then split into two faces (t1, t2). In this // case we: // 1. Find the index of the neighboring face (e.g. for t1 need index of t2) // 2. Check if the neighboring face (t2) is already in the top K faces // 3. If yes, compare the distance of the pixel to t1 with the distance to t2. // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces. const int neighbor_idx = clipped_faces_neighbor_idx[face_idx]; int neighbor_idx_top_k = -1; // Check if neighboring face is already in the top K. // -1 is the fill value in clipped_faces_neighbor_idx if (neighbor_idx != -1) { // Only need to loop until q_size. for (int i = 0; i < q_size; i++) { if (q[i].idx == neighbor_idx) { neighbor_idx_top_k = i; break; } } } // If neighbor idx is not -1 then it is in the top K struct. if (neighbor_idx_top_k != -1) { // If dist of current face is less than neighbor then overwrite the // neighbor face values in the top K struct. float neighbor_dist = abs(q[neighbor_idx_top_k].dist); if (dist < neighbor_dist) { // Overwrite the neighbor face values q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip}; // If pz > q_max then overwrite the max values and index of the max. // q_size stays the same. if (pz > q_max_z) { q_max_z = pz; q_max_idx = neighbor_idx_top_k; } } } else { // Handle as a normal face if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const int64_t* clipped_faces_neighbor_idx, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordinates of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, clipped_faces_neighbor_idx, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const at::Tensor& clipped_faces_neighbor_idx, const std::tuple<int, int> image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); TORCH_CHECK( clipped_faces_neighbor_idx.size(0) == face_verts.size(0), "clipped_faces_neighbor_idx must have save size first dimension as face_verts"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}, clipped_faces_neighbor_idx_t{ clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t, clipped_faces_neighbor_idx_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, BH, BW, T) const int64_t* clipped_faces_neighbor_idx, // (F,) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int BH, const int BW, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists, // (N, H, W, K) float* bary // (N, H, W, K, 3) ) { // This can be more than H * W if H or W are not divisible by bin_size. int num_pixels = N * BH * BW * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; // bin index y const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; // bin index y const int bx = i / (bin_size * bin_size); // pixel within the bin i %= bin_size * bin_size; // Pixel x, y indices const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, clipped_faces_neighbor_idx, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const at::Tensor& clipped_faces_neighbor_idx, const std::tuple<int, int> image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); TORCH_CHECK( clipped_faces_neighbor_idx.size(0) == face_verts.size(0), "clipped_faces_neighbor_idx must have the same first dimension as face_verts"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}, clipped_faces_neighbor_idx_t{ clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU( c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // bin_faces shape (N, BH, BW, M) const int N = bin_faces.size(0); const int BH = bin_faces.size(1); const int BW = bin_faces.size(2); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, BH, BW, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
dafd6dcaea86b3ecd031fea422456fae47ad19d4.hip
// !!! This is a file automatically generated by hipify!!! // // ht_helix.cpp // // // Created by Lorenzo Rinaldi on 29/04/14. // // // compile: // nvcc -I/usr/local/cuda-5.5/samples/common/inc -I/usr/local/cuda-5.5/targets/x86_64-linux/include -gencode arch=compute_20,code=sm_21 -o ht_rhophi ht_rhophi.cu //NOTE: INVERTITE DIMENSIONI NRHO-NPHI PER ACCESSO MATRICE #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper utility functions #include "simpleIndexing.cu" #include <string.h> #include <cmath> #include <algorithm> #include <vector> #include <iostream> #include <fstream> #include <sstream> #include <unistd.h> using namespace std; #define NHMAX 300 #define Nsec 4 // Numero settori in piano trasverso #define Ntheta 16 // Numero settori in piano longitudinale #define Nphi 1024 // Numero bin angolo polare #define Nrho 1024 // Numero bin distanza radiale #define rhomin 500.f // mm #define rhomax 100000.f // mm #define phimin 0.f // rad #define phimax 2*M_PI // rad #define thetamin 0.f // rad #define thetamax M_PI // rad #define ac_soglia 4 // soglia nella matrice di accumulazione /* --- DEFINE TO ALTER EXECUTION --- */ //#define PARALLEL_REDUX_MAX //NOTE: still wrong!! do not use it #define VERBOSE_DUMP #define CUDA_MALLOCHOST_OUTPUT //#define CUDA_MANAGED_TRANSFER #define max_tracks_out 100 int acc_Mat [ Nsec ][ Ntheta ][ Nrho ] [Nphi ]; //int Max_rel [ Nsec ][ Ntheta ][Nphi ] [Nrho ]; int debug_accMat[ Nsec ][ Ntheta ][ Nrho ] [ Nphi ]; float dtheta= M_PI/Ntheta; float drho= (rhomax-rhomin)/Nrho; float dphi= (phimax-phimin)/Nphi; vector<float> x_values; vector<float> y_values; vector<float> z_values; #define OUT_VIEW_FRAME 3; #ifndef PARALLEL_REDUX_MAX struct track_param{ int acc; /*unsigned int isec; unsigned int ith; unsigned int iphi; unsigned int irho;*/ }; #ifndef CUDA_MALLOCHOST_OUTPUT struct track_param host_out_tracks[ Nsec * Ntheta * Nrho * Nphi ]; #endif #endif //lock definition #ifndef __LOCK_H__ #define __LOCK_H__ struct Lock { int *mutex; Lock( void ) { hipMalloc( (void**)&mutex, sizeof(int) ) ; hipMemset( mutex, 0, sizeof(int) ); } ~Lock( void ) { hipFree( mutex ); } __device__ void lock( void ) { while( atomicCAS( mutex, 0, 1 ) != 0 ); } __device__ void unlock( void ) { atomicExch( mutex, 0 ); } }; #endif //end lock void read_inputFile(string file_path, unsigned int num_hits); // CUDA timer macros hipEvent_t c_start, c_stop; inline void start_time() { hipEventCreate(&c_start); hipEventCreate(&c_stop); hipEventRecord(c_start, 0); } inline float stop_time(const char *msg) { float elapsedTime = 0; hipEventRecord(c_stop, 0); hipEventSynchronize(c_stop); hipEventElapsedTime(&elapsedTime, c_start, c_stop); //printf("Time to %s: %.3f ms\n", msg, elapsedTime); hipEventDestroy(c_start); hipEventDestroy(c_stop); return elapsedTime; } //#define floatToInt(x) (((x) >= 0) ? (int)((x) + 0.5) : (int)((x) - 0.5)) #define get4DIndex(s,t,r,p) ((s)*(Ntheta*Nrho*Nphi))+(((t)*Nrho*Nphi) +(((r)*Nphi)+(p))) #define get2DIndex(r,p) (((r)*Nphi)+(p)) __global__ void voteHoughSpace(float *dev_x_values, float *dev_y_values, float *dev_z_values, int *dev_accMat, float dtheta, float drho, float dphi){ __shared__ float x_val; __shared__ float y_val; __shared__ float z_val; if(threadIdx.x == 0){ x_val = dev_x_values[blockIdx.x]; y_val = dev_y_values[blockIdx.x]; z_val = dev_z_values[blockIdx.x]; } __syncthreads(); float R2 = x_val*x_val + y_val*y_val; float theta=acos(z_val/sqrt(R2+z_val*z_val)); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_val,x_val); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor((sec/2/M_PI*Nsec)); int iphi = threadIdx.x; float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_val*cos(phi)+y_val*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); int accu_index = get4DIndex(isec, ith, irho, iphi);//(isec*(Ntheta*Nphi*Nrho))+((ith*Nphi*Nrho) +((iphi*Nrho)+irho)); if (rho<=rhomax && rho>rhomin) { atomicAdd(&(dev_accMat[accu_index]),1); } } #ifndef PARALLEL_REDUX_MAX __global__ void findRelativeMax_withShared(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ extern __shared__ int SH_local_accMat[]; //check if it is a local maxima by verifying that it is greater then (>=) its neighboors /*unsigned int index_Y0 = get2DIndex(0,iphi); unsigned int index_Y1 = get2DIndex(1,iphi); unsigned int index_Y2 = get2DIndex(2,iphi);*/ unsigned int index_Y1 = iphi; SH_local_accMat[index_Y1] = dev_accMat[get4DIndex(isec, ith, irho, iphi)]; //save into shared memory this thread accumulator //In order to avoid oppressing global memory access, we delegate upper and lower rows, irho+1 and irho-1, loading into shared memory //only to those threads which passes the first "cut" on threshold //__syncthreads(); //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ if (SH_local_accMat[index_Y1] >= ac_soglia){ //we're sure that each thread has its own acc saved in shared memory /*SH_local_accMat[index_Y0] = dev_accMat[get4DIndex(isec, ith, irho-1, iphi)]; SH_local_accMat[index_Y2] = dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]; __syncthreads();*/ //NOTE: since we only access once (irho-1,iphi) and (irho+1,iphi) for this computation, and there isn't any reuse for other //threads of these informations, we don't need to put the other two rows in shared memory //(x,y) > (x,y-1) && (x,y) >= (x,y+1) /*if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y0] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y2]){*/ if(SH_local_accMat[index_Y1] > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && SH_local_accMat[index_Y1] >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ //__syncthreads(); //this is just to make sure that all threads had written in the shared memory, before reading each other values //(x,y) > (x-1, y) && (x,y) >= (x+1, y) if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y1-1] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y1+1]){ /*atomicAdd(&local_NMrel, 1);*/ //NOTE atomic op on shared memory are SLOWER than global memory, because they're implemented in software atomicAdd(NMrel, 1); dev_output[globalIndex].acc = SH_local_accMat[index_Y1]; //dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ } } } } } __global__ void findRelativeMax(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ //check if it is a local maxima by verifying that it is greater then (>=) its neighboors //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ //each thread is assigned to one point of the accum. matrix: int acc= dev_accMat[get4DIndex(isec, ith, irho, iphi)]; if (acc >= ac_soglia){ if(acc > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && acc >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ if(acc > dev_accMat[get4DIndex(isec, ith, irho, iphi-1)] && acc >= dev_accMat[get4DIndex(isec, ith, irho, iphi+1)]){ /*atomicAdd(&local_NMrel, 1); if(threadIdx.x == 0){ mutex.lock(); *NMrel += local_NMrel; mutex.unlock(); }*/ atomicAdd(NMrel, 1); //mutex.lock(); dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ //mutex.unlock(); } } } } } #else //NOTE: wrong approach to solve this problem //TODO: improve as on slides __global__ void reduceParallelMax(int *dev_accMat, int *dev_output, int *dev_maxRelOutput, unsigned int N){ extern __shared__ int sdata[]; int* max_sdata = (int *) sdata; int* relMax_sdata = (int *) &sdata[blockDim.x]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; //local index //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; //global index (1D grid - 1D block) unsigned int i = getGlobalIdx_2D_1D(); if(i < N){ //check if thread is in data bounds max_sdata[tid] = dev_accMat[i]; relMax_sdata[tid] = dev_accMat[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=1; s < blockDim.x; s*=2){ if(tid % (2*s) == 0){ //it is for a different stride //atomicMax(&(max_sdata[tid]),max_sdata[tid+s]); //TODO: change without atomic max_sdata[tid] = (max_sdata[tid] > max_sdata[tid+s]) ? max_sdata[tid] : max_sdata[tid+s]; __syncthreads(); } __syncthreads(); } //write results (now found in the first element of the array) for this block to global memory //if(tid == 0) dev_output[blockIdx.x] = sdata[0]; if(tid == 0) dev_output[blockIdx.x] = max_sdata[0]; //at sdata[0], we found the maximum if(relMax_sdata[tid] >= ac_soglia){ dev_maxRelOutput[i] = relMax_sdata[tid]; }else{ dev_maxRelOutput[i] = 0; } } } #endif void help(char* prog) { printf("Use %s [-l #loops] [-n #hitsToRead] [-h] \n\n", prog); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -n hits Number of hits to read from input file (Default: 236).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]){ unsigned int N_LOOPS = 1; unsigned int N_HITS = 236; int c; //getting command line options while ( (c = getopt(argc, argv, "l:n:h")) != -1 ) { switch(c) { case 'n': N_HITS = atoi(optarg); break; case 'l': N_LOOPS = atoi(optarg); break; case 'h': help(argv[0]); return 0; break; default: printf("Unkown option!\n"); help(argv[0]); return 0; } } int GPU_N; checkCudaErrors(hipGetDeviceCount(&GPU_N)); hipDeviceProp_t *deviceProp; deviceProp = (hipDeviceProp_t *) malloc(sizeof(hipDeviceProp_t)*GPU_N); for(unsigned int i = 0; i < GPU_N; i++){ checkCudaErrors(hipGetDeviceProperties(&deviceProp[i], i)); cout << deviceProp[i].name << endl; } #ifndef CUDA_MANAGED_TRANSFER struct track_param *host_out_tracks; start_time(); #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipHostMalloc((void **) &host_out_tracks, (sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)))); #else host_out_tracks = malloc(sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)); #endif float init_outputMatrix = stop_time("init output matrix with hipHostMalloc"); cout << "time to init output matrix (once): " << init_outputMatrix << endl; #endif int *dev_accMat; float *dev_x_values; float *dev_y_values; float *dev_z_values; float *x_values_temp; float *y_values_temp; float *z_values_temp; //executions loop for(unsigned int loop = 0; loop < N_LOOPS; loop++){ float timing[5]; //float R = 0.f; // Inizializzo a zero le matrici memset(&acc_Mat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); memset(&debug_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); //memset(&Max_rel, 0, (sizeof(int)*(Nsec*Ntheta*Nphi*Nrho)) ); //alloc accumulator matrix on GPU start_time(); checkCudaErrors(hipMalloc((void **) &dev_accMat, (sizeof(int)* (Nsec * Ntheta * Nrho*Nphi)) )); checkCudaErrors(hipMemset(dev_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)))); timing[1] = stop_time("malloc dev_accMat and memset(0)"); //riempi i valori dentro x_values , y_values , z_values read_inputFile("hits-5000.txt", N_HITS); // read_inputFile("../datafiles/hits-1.txt"); #ifdef CUDA_MANAGED_TRANSFER int cudaVer = 0; hipRuntimeGetVersion(&cudaVer); if(cudaVer >= 6000){ start_time(); checkCudaErrors(hipMallocManaged(&dev_x_values,sizeof(float)*x_values.size())); checkCudaErrors(hipMallocManaged(&dev_y_values,sizeof(float)*y_values.size())); checkCudaErrors(hipMallocManaged(&dev_z_values,sizeof(float)*z_values.size())); for(unsigned int i = 0; i < x_values.size(); i++){ dev_x_values[i] = x_values.at(i); dev_y_values[i] = y_values.at(i); dev_z_values[i] = z_values.at(i); } timing[0] = stop_time("Input malloc and copy HtoD"); }else{ #endif x_values_temp = (float*) malloc(sizeof(float)*x_values.size()); y_values_temp = (float*) malloc(sizeof(float)*y_values.size()); z_values_temp = (float*) malloc( sizeof(float)*z_values.size()); for(unsigned int i = 0; i < x_values.size(); i++){ x_values_temp[i] = x_values.at(i); y_values_temp[i] = y_values.at(i); z_values_temp[i] = z_values.at(i); } start_time(); checkCudaErrors(hipMalloc((void **) &dev_x_values, sizeof(float)*x_values.size())); checkCudaErrors(hipMalloc((void **) &dev_y_values, sizeof(float)*y_values.size())); checkCudaErrors(hipMalloc((void **) &dev_z_values, sizeof(float)*z_values.size())); checkCudaErrors(hipMemcpy(dev_x_values, x_values_temp, sizeof(float)*x_values.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_y_values, y_values_temp, sizeof(float)*y_values.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_z_values, z_values_temp, sizeof(float)*z_values.size(), hipMemcpyHostToDevice)); timing[0] = stop_time("Input malloc and copy HtoD"); #ifdef CUDA_MANAGED_TRANSFER } #endif start_time(); hipLaunchKernelGGL(( voteHoughSpace) , dim3(x_values.size()), dim3(Nphi), 0, 0, dev_x_values, dev_y_values, dev_z_values, dev_accMat, dtheta, drho, dphi); //assumes that Nphi == Nrho timing[2] = stop_time("Vote"); #ifdef VERBOSE_DUMP checkCudaErrors(hipMemcpy((void *) &debug_accMat, dev_accMat, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)), hipMemcpyDeviceToHost)); #endif //CPU execution for(unsigned int i = 0; i < x_values.size(); i++){ //cout << x_values.at(i) << " - "; //cout << y_values.at(i) << endl; float R2=x_values.at(i)*x_values.at(i)+y_values.at(i)*y_values.at(i); float theta=acos(z_values.at(i)/sqrt(R2+z_values.at(i)*z_values.at(i))); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_values.at(i),x_values.at(i)); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor(sec/2/M_PI*Nsec); for(int iphi = 0; iphi < Nphi; iphi++){ float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_values.at(i)*cos(phi)+y_values.at(i)*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); if (rho<=rhomax && rho>rhomin) { acc_Mat[isec][ith][irho][iphi]++; } } } #ifdef VERBOSE_DUMP //check unsigned int corretto = 0; unsigned int errore = 0; unsigned int letto = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[isec][ith][irho][iphi] != debug_accMat[isec][ith][irho][iphi]){ printf("diverso acc_Mat[%d][%d][%d][%d] %d - debug_accMat[%d][%d][%d][%d] %d \n", isec, ith, irho, iphi, acc_Mat[isec][ith][irho][iphi], isec, ith, irho, iphi, debug_accMat[isec][ith][irho][iphi]); errore++; }else corretto++; letto++; } } } } printf("corretti %d sbaglati %d; letti %d\n", corretto, errore, letto); /*for(unsigned int i = 0; i < Nsec; i++){ cout << "sec " << i << ":" << endl; for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[i][ith][iphi][irho] != 0) cout << "accMat[get3DIndex(" << ith << ", " << iphi << ", " << irho << ") = " << acc_Mat[i][ith][iphi][irho] << endl; } } } }*/ #endif checkCudaErrors(hipFree(dev_x_values)); checkCudaErrors(hipFree(dev_y_values)); checkCudaErrors(hipFree(dev_z_values)); #ifndef CUDA_MANAGED_TRANSFER free(x_values_temp); free(y_values_temp); free(z_values_temp); #endif x_values.clear(); y_values.clear(); z_values.clear(); //trova il massimo relativo unsigned int host_NMrel = 0; // --- Prendiamo le informazioni specifiche della GPU per la divisione del lavoro appropriata unsigned int maxThreadsPerBlock = deviceProp[0].maxThreadsPerBlock; #ifndef PARALLEL_REDUX_MAX struct track_param *dev_indexOutput; Lock my_lock; unsigned int *NMrel; start_time(); #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ checkCudaErrors(hipMallocManaged(&dev_indexOutput,(sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi)) )); checkCudaErrors(hipMallocManaged(&NMrel,sizeof(unsigned int) )); *NMrel = 0; }else{ #endif checkCudaErrors(hipMalloc((void **) &NMrel, (sizeof(unsigned int)))); checkCudaErrors(hipMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(hipMalloc((void **) &dev_indexOutput, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )) )); #ifdef CUDA_MANAGED_TRANSFER } #endif checkCudaErrors(hipMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); timing[1] += stop_time("malloc dev_indexOutput+NMrel and memset"); // dividiamo adeguatamente il lavoro // in base al numero massimo di thread disponibili in un singolo thread-block unsigned int dim_x_block = Nphi; unsigned int dim_y_block = maxThreadsPerBlock/dim_x_block; unsigned int dim_x_grid = Nsec; unsigned int dim_y_grid = Ntheta; unsigned int dim_z_grid = (Nrho/dim_y_block); dim3 grid(dim_x_grid, dim_y_grid, dim_z_grid); dim3 block(dim_x_block, dim_y_block); start_time(); hipLaunchKernelGGL(( findRelativeMax), dim3(grid), dim3(block), 0, 0, dev_accMat, dev_indexOutput, NMrel); timing[3] = stop_time("Max. Relative"); size_t block_shMemsize = dim_x_block * dim_y_block * sizeof(int); //block_shMemsize *= OUT_VIEW_FRAME; //add more cells to each block shared-memory bank cout << "sh memsize " << block_shMemsize << endl; checkCudaErrors(hipMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(hipMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); hipLaunchKernelGGL(( findRelativeMax_withShared) , dim3(grid), dim3(block), block_shMemsize, 0, dev_accMat, dev_indexOutput, NMrel); start_time(); #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipMemcpy((void *) host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), hipMemcpyDeviceToHost)); #else checkCudaErrors(hipMemcpy((void *) &host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), hipMemcpyDeviceToHost)); #endif #endif #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ host_NMrel = *NMrel; }else{ #endif checkCudaErrors(hipMemcpy((void *) &host_NMrel, NMrel, (sizeof(int)), hipMemcpyDeviceToHost)); #ifdef CUDA_MANAGED_TRANSFER } #endif timing[4] = stop_time("Copy results DtoH"); #ifdef VERBOSE_DUMP cout << "NMrel from GPU "<< host_NMrel << endl; unsigned int ntracks = 0; /*for(unsigned int i = 0; ((i < (Nsec * Ntheta * Nphi * Nrho)) && (ntracks < host_NMrel)); i++){ #ifndef CUDA_MANAGED_TRANSFER if(host_out_tracks[i].acc > -1){ cout << "track " << ntracks << " host_out_tracks value = " << host_out_tracks[i].acc << " [" << i << "]" << endl; ntracks++; } #else if(dev_indexOutput[i].acc > -1){ cout << "track " << ntracks << " dev_indexOutput value = " << dev_indexOutput[i].acc << " [" << i << "]" << endl; ntracks++; } #endif }*/ #endif //free mem checkCudaErrors(hipFree(dev_indexOutput)); checkCudaErrors(hipFree(NMrel)); //print timing results with this format: // NHIT HtoD_input MEMSET_cumulative VOTE MAX_REL DtoH_output cout << N_HITS << " " << timing[0] << " " << timing[1] << " " << timing[2] << " " << timing[3] << " " << timing[4] << endl; #else #define SET_GRID_DIM(npoints, threadsPerBlock) ceil((npoints+((threadsPerBlock)-1))/(threadsPerBlock)) unsigned int half_grid = SET_GRID_DIM((Nsec*Ntheta*Nphi*Nrho), maxThreadsPerBlock)/2; dim3 grid(half_grid, 2); unsigned int n_blocks = half_grid * 2; int * dev_maxBlockOutput; checkCudaErrors(hipMalloc((void **) &dev_maxBlockOutput, (sizeof(int) * n_blocks))); int * dev_maxRelOutput; checkCudaErrors(hipMalloc((void **) &dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)))); hipLaunchKernelGGL(( reduceParallelMax), dim3(grid), dim3(maxThreadsPerBlock), 2*(maxThreadsPerBlock*sizeof(int)), 0, dev_accMat, dev_maxBlockOutput, dev_maxRelOutput, (Nsec*Ntheta*Nphi*Nrho)); int *host_maxBlockOutput = (int *) malloc((sizeof(int)* n_blocks)); checkCudaErrors(hipMemcpy(host_maxBlockOutput, dev_maxBlockOutput, (sizeof(int) * n_blocks), hipMemcpyDeviceToHost)); int *host_maxRelOutput = (int *) malloc((sizeof(int)* (Nsec*Ntheta*Nphi*Nrho))); checkCudaErrors(hipMemcpy(host_maxRelOutput, dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)), hipMemcpyDeviceToHost)); unsigned int debug = 0; for(unsigned int i = 0; i < n_blocks; i++){ if(host_maxBlockOutput[i] != 0){ cout << "block " << i << " max: " << host_maxBlockOutput[i] << " [" << i*maxThreadsPerBlock << "]" << endl; host_NMrel++; } unsigned int found = 0; for(unsigned int y = 0; y < maxThreadsPerBlock; y++){ unsigned int globalIndex = (y+(i*maxThreadsPerBlock)); if((host_maxRelOutput[globalIndex] != 0)) { cout << "out["<< globalIndex << "]="<< host_maxRelOutput[globalIndex]<< " "; found++; debug++; } } if(found > 0) cout << " (block "<< i << ")" << endl << endl; } /*for(unsigned int i = 0; i < (Nsec*Ntheta*Nphi*Nrho); i += maxThreadsPerBlock){ if(host_maxBlockOutput[i] != 0) cout << "block" << i/maxThreadsPerBlock << " max: " << host_maxBlockOutput[i] << " [" << i << "]" << endl; unsigned int found = 0; for(unsigned int y = 0; y < (maxThreadsPerBlock); y++){ // check relative maxima if((host_maxRelOutput[i+y] != 0)){ cout << "out["<< i+y << "]="<< host_maxRelOutput[i+y]<< " "; found++; host_NMrel++;} } if(found > 0) cout << endl << endl; }*/ cout << "NMrel from GPU "<< host_NMrel << " " << debug << endl; hipFree(dev_maxBlockOutput); hipFree(dev_maxRelOutput); free(host_maxBlockOutput); free(host_maxRelOutput); #endif host_NMrel = 0; int accumax = -1; int iphiMax = 0; int irhoMax = 0; int ithMax = 0; int isecMax = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 1; iphi < Nphi-1; iphi++){ for(unsigned int irho = 1; irho < Nrho-1; irho++){ float acc=acc_Mat[isec][ith][irho][iphi]; if (acc >= ac_soglia){ if (acc > accumax){ accumax=acc; } /*if (acc>acc_Mat[isec][ith-1][iphi][irho] && acc >= acc_Mat[isec][ith+1][iphi][irho]){ if (acc>acc_Mat[isec][ith][iphi-1][irho-1] && acc >= acc_Mat[isec][ith][iphi-1][irho+1]){ //TODO: chiedi a Lorenzo perch [iphi+1][irho+1] invece di [iphi-1][irho+1] if (acc>acc_Mat[isec][ith][iphi][irho-1] && acc >= acc_Mat[isec][ith][iphi][irho+1]){ if (acc>acc_Mat[isec][ith][iphi+1][irho-1] && acc >= acc_Mat[isec][ith][iphi+1][irho+1]){*/ if(acc > acc_Mat[isec][ith][irho-1][iphi] && acc >= acc_Mat[isec][ith][irho+1][iphi]){ if(acc > acc_Mat[isec][ith][irho][iphi-1] && acc >= acc_Mat[isec][ith][irho][iphi+1]){ //if (acc>=acc_Mat[isec][ith][irho][iphi+1] ){ accumax = acc_Mat[isec][ith][irho][iphi+1]; //Max_rel[isec][ith][irho][iphi+1]=1; host_NMrel++; ithMax=ith; irhoMax=irho; iphiMax=iphi; isecMax=isec+1; float t_th=(thetamin+ithMax*dtheta)*360.f/M_PI; float t_rho=rhomin+irhoMax*drho; float t_phi=phimin+iphiMax*dphi; //float q=t_rho/sin(t_phi); //float xm=-1/tan(t_phi); //cout << acc <<" "<< t_rho <<" "<< t_phi << " " << isecMax << endl; //} //} //} } } } } } } } #ifdef VERBOSE_DUMP cout << "NMrel from CPU "<< host_NMrel << endl; #endif checkCudaErrors(hipFree(dev_accMat)); } #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipHostFree(host_out_tracks)); #endif #endif return 0; } /***************************** * file opener *****************************/ void read_inputFile(string file_path, unsigned int num_hits) { ifstream input_f; string line; string value; stringstream ss; unsigned int val_iter; unsigned int line_read = 0; input_f.open(file_path.c_str()); if (input_f.is_open()) { while ( getline (input_f,line) && (line_read < num_hits) ) { val_iter = 0; ss.str(line); //prendiamo dati direttamente dal file ASCII in input while(ss >> value){ //i valori che ci interessano sono X, Y e Z if (val_iter == 0) x_values.push_back(atof(value.c_str())); else if (val_iter == 1) y_values.push_back(atof(value.c_str())); else if (val_iter == 2) z_values.push_back(atof(value.c_str())); val_iter++; } ss.clear(); line_read++; } input_f.close(); } }
dafd6dcaea86b3ecd031fea422456fae47ad19d4.cu
// // ht_helix.cpp // // // Created by Lorenzo Rinaldi on 29/04/14. // // // compile: // nvcc -I/usr/local/cuda-5.5/samples/common/inc -I/usr/local/cuda-5.5/targets/x86_64-linux/include -gencode arch=compute_20,code=sm_21 -o ht_rhophi ht_rhophi.cu //NOTE: INVERTITE DIMENSIONI NRHO-NPHI PER ACCESSO MATRICE #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper utility functions #include "simpleIndexing.cu" #include <string.h> #include <cmath> #include <algorithm> #include <vector> #include <iostream> #include <fstream> #include <sstream> #include <unistd.h> using namespace std; #define NHMAX 300 #define Nsec 4 // Numero settori in piano trasverso #define Ntheta 16 // Numero settori in piano longitudinale #define Nphi 1024 // Numero bin angolo polare #define Nrho 1024 // Numero bin distanza radiale #define rhomin 500.f // mm #define rhomax 100000.f // mm #define phimin 0.f // rad #define phimax 2*M_PI // rad #define thetamin 0.f // rad #define thetamax M_PI // rad #define ac_soglia 4 // soglia nella matrice di accumulazione /* --- DEFINE TO ALTER EXECUTION --- */ //#define PARALLEL_REDUX_MAX //NOTE: still wrong!! do not use it #define VERBOSE_DUMP #define CUDA_MALLOCHOST_OUTPUT //#define CUDA_MANAGED_TRANSFER #define max_tracks_out 100 int acc_Mat [ Nsec ][ Ntheta ][ Nrho ] [Nphi ]; //int Max_rel [ Nsec ][ Ntheta ][Nphi ] [Nrho ]; int debug_accMat[ Nsec ][ Ntheta ][ Nrho ] [ Nphi ]; float dtheta= M_PI/Ntheta; float drho= (rhomax-rhomin)/Nrho; float dphi= (phimax-phimin)/Nphi; vector<float> x_values; vector<float> y_values; vector<float> z_values; #define OUT_VIEW_FRAME 3; #ifndef PARALLEL_REDUX_MAX struct track_param{ int acc; /*unsigned int isec; unsigned int ith; unsigned int iphi; unsigned int irho;*/ }; #ifndef CUDA_MALLOCHOST_OUTPUT struct track_param host_out_tracks[ Nsec * Ntheta * Nrho * Nphi ]; #endif #endif //lock definition #ifndef __LOCK_H__ #define __LOCK_H__ struct Lock { int *mutex; Lock( void ) { cudaMalloc( (void**)&mutex, sizeof(int) ) ; cudaMemset( mutex, 0, sizeof(int) ); } ~Lock( void ) { cudaFree( mutex ); } __device__ void lock( void ) { while( atomicCAS( mutex, 0, 1 ) != 0 ); } __device__ void unlock( void ) { atomicExch( mutex, 0 ); } }; #endif //end lock void read_inputFile(string file_path, unsigned int num_hits); // CUDA timer macros cudaEvent_t c_start, c_stop; inline void start_time() { cudaEventCreate(&c_start); cudaEventCreate(&c_stop); cudaEventRecord(c_start, 0); } inline float stop_time(const char *msg) { float elapsedTime = 0; cudaEventRecord(c_stop, 0); cudaEventSynchronize(c_stop); cudaEventElapsedTime(&elapsedTime, c_start, c_stop); //printf("Time to %s: %.3f ms\n", msg, elapsedTime); cudaEventDestroy(c_start); cudaEventDestroy(c_stop); return elapsedTime; } //#define floatToInt(x) (((x) >= 0) ? (int)((x) + 0.5) : (int)((x) - 0.5)) #define get4DIndex(s,t,r,p) ((s)*(Ntheta*Nrho*Nphi))+(((t)*Nrho*Nphi) +(((r)*Nphi)+(p))) #define get2DIndex(r,p) (((r)*Nphi)+(p)) __global__ void voteHoughSpace(float *dev_x_values, float *dev_y_values, float *dev_z_values, int *dev_accMat, float dtheta, float drho, float dphi){ __shared__ float x_val; __shared__ float y_val; __shared__ float z_val; if(threadIdx.x == 0){ x_val = dev_x_values[blockIdx.x]; y_val = dev_y_values[blockIdx.x]; z_val = dev_z_values[blockIdx.x]; } __syncthreads(); float R2 = x_val*x_val + y_val*y_val; float theta=acos(z_val/sqrt(R2+z_val*z_val)); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_val,x_val); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor((sec/2/M_PI*Nsec)); int iphi = threadIdx.x; float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_val*cos(phi)+y_val*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); int accu_index = get4DIndex(isec, ith, irho, iphi);//(isec*(Ntheta*Nphi*Nrho))+((ith*Nphi*Nrho) +((iphi*Nrho)+irho)); if (rho<=rhomax && rho>rhomin) { atomicAdd(&(dev_accMat[accu_index]),1); } } #ifndef PARALLEL_REDUX_MAX __global__ void findRelativeMax_withShared(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ extern __shared__ int SH_local_accMat[]; //check if it is a local maxima by verifying that it is greater then (>=) its neighboors /*unsigned int index_Y0 = get2DIndex(0,iphi); unsigned int index_Y1 = get2DIndex(1,iphi); unsigned int index_Y2 = get2DIndex(2,iphi);*/ unsigned int index_Y1 = iphi; SH_local_accMat[index_Y1] = dev_accMat[get4DIndex(isec, ith, irho, iphi)]; //save into shared memory this thread accumulator //In order to avoid oppressing global memory access, we delegate upper and lower rows, irho+1 and irho-1, loading into shared memory //only to those threads which passes the first "cut" on threshold //__syncthreads(); //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ if (SH_local_accMat[index_Y1] >= ac_soglia){ //we're sure that each thread has its own acc saved in shared memory /*SH_local_accMat[index_Y0] = dev_accMat[get4DIndex(isec, ith, irho-1, iphi)]; SH_local_accMat[index_Y2] = dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]; __syncthreads();*/ //NOTE: since we only access once (irho-1,iphi) and (irho+1,iphi) for this computation, and there isn't any reuse for other //threads of these informations, we don't need to put the other two rows in shared memory //(x,y) > (x,y-1) && (x,y) >= (x,y+1) /*if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y0] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y2]){*/ if(SH_local_accMat[index_Y1] > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && SH_local_accMat[index_Y1] >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ //__syncthreads(); //this is just to make sure that all threads had written in the shared memory, before reading each other values //(x,y) > (x-1, y) && (x,y) >= (x+1, y) if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y1-1] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y1+1]){ /*atomicAdd(&local_NMrel, 1);*/ //NOTE atomic op on shared memory are SLOWER than global memory, because they're implemented in software atomicAdd(NMrel, 1); dev_output[globalIndex].acc = SH_local_accMat[index_Y1]; //dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ } } } } } __global__ void findRelativeMax(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ //check if it is a local maxima by verifying that it is greater then (>=) its neighboors //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ //each thread is assigned to one point of the accum. matrix: int acc= dev_accMat[get4DIndex(isec, ith, irho, iphi)]; if (acc >= ac_soglia){ if(acc > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && acc >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ if(acc > dev_accMat[get4DIndex(isec, ith, irho, iphi-1)] && acc >= dev_accMat[get4DIndex(isec, ith, irho, iphi+1)]){ /*atomicAdd(&local_NMrel, 1); if(threadIdx.x == 0){ mutex.lock(); *NMrel += local_NMrel; mutex.unlock(); }*/ atomicAdd(NMrel, 1); //mutex.lock(); dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ //mutex.unlock(); } } } } } #else //NOTE: wrong approach to solve this problem //TODO: improve as on slides __global__ void reduceParallelMax(int *dev_accMat, int *dev_output, int *dev_maxRelOutput, unsigned int N){ extern __shared__ int sdata[]; int* max_sdata = (int *) sdata; int* relMax_sdata = (int *) &sdata[blockDim.x]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; //local index //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; //global index (1D grid - 1D block) unsigned int i = getGlobalIdx_2D_1D(); if(i < N){ //check if thread is in data bounds max_sdata[tid] = dev_accMat[i]; relMax_sdata[tid] = dev_accMat[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=1; s < blockDim.x; s*=2){ if(tid % (2*s) == 0){ //it is for a different stride //atomicMax(&(max_sdata[tid]),max_sdata[tid+s]); //TODO: change without atomic max_sdata[tid] = (max_sdata[tid] > max_sdata[tid+s]) ? max_sdata[tid] : max_sdata[tid+s]; __syncthreads(); } __syncthreads(); } //write results (now found in the first element of the array) for this block to global memory //if(tid == 0) dev_output[blockIdx.x] = sdata[0]; if(tid == 0) dev_output[blockIdx.x] = max_sdata[0]; //at sdata[0], we found the maximum if(relMax_sdata[tid] >= ac_soglia){ dev_maxRelOutput[i] = relMax_sdata[tid]; }else{ dev_maxRelOutput[i] = 0; } } } #endif void help(char* prog) { printf("Use %s [-l #loops] [-n #hitsToRead] [-h] \n\n", prog); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -n hits Number of hits to read from input file (Default: 236).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]){ unsigned int N_LOOPS = 1; unsigned int N_HITS = 236; int c; //getting command line options while ( (c = getopt(argc, argv, "l:n:h")) != -1 ) { switch(c) { case 'n': N_HITS = atoi(optarg); break; case 'l': N_LOOPS = atoi(optarg); break; case 'h': help(argv[0]); return 0; break; default: printf("Unkown option!\n"); help(argv[0]); return 0; } } int GPU_N; checkCudaErrors(cudaGetDeviceCount(&GPU_N)); cudaDeviceProp *deviceProp; deviceProp = (cudaDeviceProp *) malloc(sizeof(cudaDeviceProp)*GPU_N); for(unsigned int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaGetDeviceProperties(&deviceProp[i], i)); cout << deviceProp[i].name << endl; } #ifndef CUDA_MANAGED_TRANSFER struct track_param *host_out_tracks; start_time(); #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaMallocHost((void **) &host_out_tracks, (sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)))); #else host_out_tracks = malloc(sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)); #endif float init_outputMatrix = stop_time("init output matrix with cudaMallocHost"); cout << "time to init output matrix (once): " << init_outputMatrix << endl; #endif int *dev_accMat; float *dev_x_values; float *dev_y_values; float *dev_z_values; float *x_values_temp; float *y_values_temp; float *z_values_temp; //executions loop for(unsigned int loop = 0; loop < N_LOOPS; loop++){ float timing[5]; //float R = 0.f; // Inizializzo a zero le matrici memset(&acc_Mat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); memset(&debug_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); //memset(&Max_rel, 0, (sizeof(int)*(Nsec*Ntheta*Nphi*Nrho)) ); //alloc accumulator matrix on GPU start_time(); checkCudaErrors(cudaMalloc((void **) &dev_accMat, (sizeof(int)* (Nsec * Ntheta * Nrho*Nphi)) )); checkCudaErrors(cudaMemset(dev_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)))); timing[1] = stop_time("malloc dev_accMat and memset(0)"); //riempi i valori dentro x_values , y_values , z_values read_inputFile("hits-5000.txt", N_HITS); // read_inputFile("../datafiles/hits-1.txt"); #ifdef CUDA_MANAGED_TRANSFER int cudaVer = 0; cudaRuntimeGetVersion(&cudaVer); if(cudaVer >= 6000){ start_time(); checkCudaErrors(cudaMallocManaged(&dev_x_values,sizeof(float)*x_values.size())); checkCudaErrors(cudaMallocManaged(&dev_y_values,sizeof(float)*y_values.size())); checkCudaErrors(cudaMallocManaged(&dev_z_values,sizeof(float)*z_values.size())); for(unsigned int i = 0; i < x_values.size(); i++){ dev_x_values[i] = x_values.at(i); dev_y_values[i] = y_values.at(i); dev_z_values[i] = z_values.at(i); } timing[0] = stop_time("Input malloc and copy HtoD"); }else{ #endif x_values_temp = (float*) malloc(sizeof(float)*x_values.size()); y_values_temp = (float*) malloc(sizeof(float)*y_values.size()); z_values_temp = (float*) malloc( sizeof(float)*z_values.size()); for(unsigned int i = 0; i < x_values.size(); i++){ x_values_temp[i] = x_values.at(i); y_values_temp[i] = y_values.at(i); z_values_temp[i] = z_values.at(i); } start_time(); checkCudaErrors(cudaMalloc((void **) &dev_x_values, sizeof(float)*x_values.size())); checkCudaErrors(cudaMalloc((void **) &dev_y_values, sizeof(float)*y_values.size())); checkCudaErrors(cudaMalloc((void **) &dev_z_values, sizeof(float)*z_values.size())); checkCudaErrors(cudaMemcpy(dev_x_values, x_values_temp, sizeof(float)*x_values.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_y_values, y_values_temp, sizeof(float)*y_values.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_z_values, z_values_temp, sizeof(float)*z_values.size(), cudaMemcpyHostToDevice)); timing[0] = stop_time("Input malloc and copy HtoD"); #ifdef CUDA_MANAGED_TRANSFER } #endif start_time(); voteHoughSpace <<<x_values.size(), Nphi>>> (dev_x_values, dev_y_values, dev_z_values, dev_accMat, dtheta, drho, dphi); //assumes that Nphi == Nrho timing[2] = stop_time("Vote"); #ifdef VERBOSE_DUMP checkCudaErrors(cudaMemcpy((void *) &debug_accMat, dev_accMat, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)), cudaMemcpyDeviceToHost)); #endif //CPU execution for(unsigned int i = 0; i < x_values.size(); i++){ //cout << x_values.at(i) << " - "; //cout << y_values.at(i) << endl; float R2=x_values.at(i)*x_values.at(i)+y_values.at(i)*y_values.at(i); float theta=acos(z_values.at(i)/sqrt(R2+z_values.at(i)*z_values.at(i))); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_values.at(i),x_values.at(i)); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor(sec/2/M_PI*Nsec); for(int iphi = 0; iphi < Nphi; iphi++){ float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_values.at(i)*cos(phi)+y_values.at(i)*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); if (rho<=rhomax && rho>rhomin) { acc_Mat[isec][ith][irho][iphi]++; } } } #ifdef VERBOSE_DUMP //check unsigned int corretto = 0; unsigned int errore = 0; unsigned int letto = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[isec][ith][irho][iphi] != debug_accMat[isec][ith][irho][iphi]){ printf("diverso acc_Mat[%d][%d][%d][%d] %d - debug_accMat[%d][%d][%d][%d] %d \n", isec, ith, irho, iphi, acc_Mat[isec][ith][irho][iphi], isec, ith, irho, iphi, debug_accMat[isec][ith][irho][iphi]); errore++; }else corretto++; letto++; } } } } printf("corretti %d sbaglati %d; letti %d\n", corretto, errore, letto); /*for(unsigned int i = 0; i < Nsec; i++){ cout << "sec " << i << ":" << endl; for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[i][ith][iphi][irho] != 0) cout << "accMat[get3DIndex(" << ith << ", " << iphi << ", " << irho << ") = " << acc_Mat[i][ith][iphi][irho] << endl; } } } }*/ #endif checkCudaErrors(cudaFree(dev_x_values)); checkCudaErrors(cudaFree(dev_y_values)); checkCudaErrors(cudaFree(dev_z_values)); #ifndef CUDA_MANAGED_TRANSFER free(x_values_temp); free(y_values_temp); free(z_values_temp); #endif x_values.clear(); y_values.clear(); z_values.clear(); //trova il massimo relativo unsigned int host_NMrel = 0; // --- Prendiamo le informazioni specifiche della GPU per la divisione del lavoro appropriata unsigned int maxThreadsPerBlock = deviceProp[0].maxThreadsPerBlock; #ifndef PARALLEL_REDUX_MAX struct track_param *dev_indexOutput; Lock my_lock; unsigned int *NMrel; start_time(); #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ checkCudaErrors(cudaMallocManaged(&dev_indexOutput,(sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi)) )); checkCudaErrors(cudaMallocManaged(&NMrel,sizeof(unsigned int) )); *NMrel = 0; }else{ #endif checkCudaErrors(cudaMalloc((void **) &NMrel, (sizeof(unsigned int)))); checkCudaErrors(cudaMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void **) &dev_indexOutput, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )) )); #ifdef CUDA_MANAGED_TRANSFER } #endif checkCudaErrors(cudaMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); timing[1] += stop_time("malloc dev_indexOutput+NMrel and memset"); // dividiamo adeguatamente il lavoro // in base al numero massimo di thread disponibili in un singolo thread-block unsigned int dim_x_block = Nphi; unsigned int dim_y_block = maxThreadsPerBlock/dim_x_block; unsigned int dim_x_grid = Nsec; unsigned int dim_y_grid = Ntheta; unsigned int dim_z_grid = (Nrho/dim_y_block); dim3 grid(dim_x_grid, dim_y_grid, dim_z_grid); dim3 block(dim_x_block, dim_y_block); start_time(); findRelativeMax<<<grid, block>>>(dev_accMat, dev_indexOutput, NMrel); timing[3] = stop_time("Max. Relative"); size_t block_shMemsize = dim_x_block * dim_y_block * sizeof(int); //block_shMemsize *= OUT_VIEW_FRAME; //add more cells to each block shared-memory bank cout << "sh memsize " << block_shMemsize << endl; checkCudaErrors(cudaMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(cudaMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); findRelativeMax_withShared <<<grid, block, block_shMemsize>>> (dev_accMat, dev_indexOutput, NMrel); start_time(); #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaMemcpy((void *) host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), cudaMemcpyDeviceToHost)); #else checkCudaErrors(cudaMemcpy((void *) &host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), cudaMemcpyDeviceToHost)); #endif #endif #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ host_NMrel = *NMrel; }else{ #endif checkCudaErrors(cudaMemcpy((void *) &host_NMrel, NMrel, (sizeof(int)), cudaMemcpyDeviceToHost)); #ifdef CUDA_MANAGED_TRANSFER } #endif timing[4] = stop_time("Copy results DtoH"); #ifdef VERBOSE_DUMP cout << "NMrel from GPU "<< host_NMrel << endl; unsigned int ntracks = 0; /*for(unsigned int i = 0; ((i < (Nsec * Ntheta * Nphi * Nrho)) && (ntracks < host_NMrel)); i++){ #ifndef CUDA_MANAGED_TRANSFER if(host_out_tracks[i].acc > -1){ cout << "track " << ntracks << " host_out_tracks value = " << host_out_tracks[i].acc << " [" << i << "]" << endl; ntracks++; } #else if(dev_indexOutput[i].acc > -1){ cout << "track " << ntracks << " dev_indexOutput value = " << dev_indexOutput[i].acc << " [" << i << "]" << endl; ntracks++; } #endif }*/ #endif //free mem checkCudaErrors(cudaFree(dev_indexOutput)); checkCudaErrors(cudaFree(NMrel)); //print timing results with this format: // NHIT HtoD_input MEMSET_cumulative VOTE MAX_REL DtoH_output cout << N_HITS << " " << timing[0] << " " << timing[1] << " " << timing[2] << " " << timing[3] << " " << timing[4] << endl; #else #define SET_GRID_DIM(npoints, threadsPerBlock) ceil((npoints+((threadsPerBlock)-1))/(threadsPerBlock)) unsigned int half_grid = SET_GRID_DIM((Nsec*Ntheta*Nphi*Nrho), maxThreadsPerBlock)/2; dim3 grid(half_grid, 2); unsigned int n_blocks = half_grid * 2; int * dev_maxBlockOutput; checkCudaErrors(cudaMalloc((void **) &dev_maxBlockOutput, (sizeof(int) * n_blocks))); int * dev_maxRelOutput; checkCudaErrors(cudaMalloc((void **) &dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)))); reduceParallelMax<<<grid, maxThreadsPerBlock, 2*(maxThreadsPerBlock*sizeof(int))>>>(dev_accMat, dev_maxBlockOutput, dev_maxRelOutput, (Nsec*Ntheta*Nphi*Nrho)); int *host_maxBlockOutput = (int *) malloc((sizeof(int)* n_blocks)); checkCudaErrors(cudaMemcpy(host_maxBlockOutput, dev_maxBlockOutput, (sizeof(int) * n_blocks), cudaMemcpyDeviceToHost)); int *host_maxRelOutput = (int *) malloc((sizeof(int)* (Nsec*Ntheta*Nphi*Nrho))); checkCudaErrors(cudaMemcpy(host_maxRelOutput, dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)), cudaMemcpyDeviceToHost)); unsigned int debug = 0; for(unsigned int i = 0; i < n_blocks; i++){ if(host_maxBlockOutput[i] != 0){ cout << "block " << i << " max: " << host_maxBlockOutput[i] << " [" << i*maxThreadsPerBlock << "]" << endl; host_NMrel++; } unsigned int found = 0; for(unsigned int y = 0; y < maxThreadsPerBlock; y++){ unsigned int globalIndex = (y+(i*maxThreadsPerBlock)); if((host_maxRelOutput[globalIndex] != 0)) { cout << "out["<< globalIndex << "]="<< host_maxRelOutput[globalIndex]<< " "; found++; debug++; } } if(found > 0) cout << " (block "<< i << ")" << endl << endl; } /*for(unsigned int i = 0; i < (Nsec*Ntheta*Nphi*Nrho); i += maxThreadsPerBlock){ if(host_maxBlockOutput[i] != 0) cout << "block" << i/maxThreadsPerBlock << " max: " << host_maxBlockOutput[i] << " [" << i << "]" << endl; unsigned int found = 0; for(unsigned int y = 0; y < (maxThreadsPerBlock); y++){ // check relative maxima if((host_maxRelOutput[i+y] != 0)){ cout << "out["<< i+y << "]="<< host_maxRelOutput[i+y]<< " "; found++; host_NMrel++;} } if(found > 0) cout << endl << endl; }*/ cout << "NMrel from GPU "<< host_NMrel << " " << debug << endl; cudaFree(dev_maxBlockOutput); cudaFree(dev_maxRelOutput); free(host_maxBlockOutput); free(host_maxRelOutput); #endif host_NMrel = 0; int accumax = -1; int iphiMax = 0; int irhoMax = 0; int ithMax = 0; int isecMax = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 1; iphi < Nphi-1; iphi++){ for(unsigned int irho = 1; irho < Nrho-1; irho++){ float acc=acc_Mat[isec][ith][irho][iphi]; if (acc >= ac_soglia){ if (acc > accumax){ accumax=acc; } /*if (acc>acc_Mat[isec][ith-1][iphi][irho] && acc >= acc_Mat[isec][ith+1][iphi][irho]){ if (acc>acc_Mat[isec][ith][iphi-1][irho-1] && acc >= acc_Mat[isec][ith][iphi-1][irho+1]){ //TODO: chiedi a Lorenzo perché [iphi+1][irho+1] invece di [iphi-1][irho+1] if (acc>acc_Mat[isec][ith][iphi][irho-1] && acc >= acc_Mat[isec][ith][iphi][irho+1]){ if (acc>acc_Mat[isec][ith][iphi+1][irho-1] && acc >= acc_Mat[isec][ith][iphi+1][irho+1]){*/ if(acc > acc_Mat[isec][ith][irho-1][iphi] && acc >= acc_Mat[isec][ith][irho+1][iphi]){ if(acc > acc_Mat[isec][ith][irho][iphi-1] && acc >= acc_Mat[isec][ith][irho][iphi+1]){ //if (acc>=acc_Mat[isec][ith][irho][iphi+1] ){ accumax = acc_Mat[isec][ith][irho][iphi+1]; //Max_rel[isec][ith][irho][iphi+1]=1; host_NMrel++; ithMax=ith; irhoMax=irho; iphiMax=iphi; isecMax=isec+1; float t_th=(thetamin+ithMax*dtheta)*360.f/M_PI; float t_rho=rhomin+irhoMax*drho; float t_phi=phimin+iphiMax*dphi; //float q=t_rho/sin(t_phi); //float xm=-1/tan(t_phi); //cout << acc <<" "<< t_rho <<" "<< t_phi << " " << isecMax << endl; //} //} //} } } } } } } } #ifdef VERBOSE_DUMP cout << "NMrel from CPU "<< host_NMrel << endl; #endif checkCudaErrors(cudaFree(dev_accMat)); } #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaFreeHost(host_out_tracks)); #endif #endif return 0; } /***************************** * file opener *****************************/ void read_inputFile(string file_path, unsigned int num_hits) { ifstream input_f; string line; string value; stringstream ss; unsigned int val_iter; unsigned int line_read = 0; input_f.open(file_path.c_str()); if (input_f.is_open()) { while ( getline (input_f,line) && (line_read < num_hits) ) { val_iter = 0; ss.str(line); //prendiamo dati direttamente dal file ASCII in input while(ss >> value){ //i valori che ci interessano sono X, Y e Z if (val_iter == 0) x_values.push_back(atof(value.c_str())); else if (val_iter == 1) y_values.push_back(atof(value.c_str())); else if (val_iter == 2) z_values.push_back(atof(value.c_str())); val_iter++; } ss.clear(); line_read++; } input_f.close(); } }
31b3bbe65b8494ae73d723ba0da8fc0a531ad46b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include "common.h" #include "device_tensor.h" namespace { template<typename DType, typename Acctype> struct AggOp { __device__ AggOp(DeviceTensor<DType, 3> a, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : A(a), X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d])); } DeviceTensor<DType, 3> A; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct AggBackOp { __device__ AggBackOp(DeviceTensor<DType, 3> g, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : G(g), X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d])); } DeviceTensor<DType, 3> G; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct SL2Op { __device__ SL2Op(DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { DType r = X[b][i][d] - C[k][d]; return ScalarConvert<DType, Acctype>::to(r * r); } DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct SL2GradXOp { __device__ SL2GradXOp( DeviceTensor<DType, 3> gsl, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 1> s ) : GSL(gsl), X(x), C(c), S(s) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to( 2 * S[k] * GSL[b][i][k] * (X[b][i][d]-C[k][d])); } DeviceTensor<DType, 3> GSL; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 1> S; }; template<typename DType, typename Acctype> __global__ void Aggregate_Forward_kernel ( DeviceTensor<DType, 3> E, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C) { /* declarations of the variables */ int b, k, d, N; /* Get the index and channels */ b = blockIdx.z; d = blockIdx.x; k = blockIdx.y; N = X.getSize(1); /* main operation */ AggOp<DType, Acctype> g(A, X, C); E[b][k][d] = reduceN<Acctype>(g, b, k, d, N); } template<typename DType, typename Acctype> __global__ void Aggregate_Backward_kernel ( DeviceTensor<DType, 3> GA, DeviceTensor<DType, 3> GE, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; i = blockIdx.y; k = blockIdx.x; D = GE.getSize(2); /* main operation */ AggBackOp<DType, Acctype> g(GE, X, C); GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D); } template<typename DType, typename Acctype> __global__ void ScaledL2_Forward_kernel ( DeviceTensor<DType, 3> SL, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; k = blockIdx.x; i = blockIdx.y; D = X.getSize(2); /* main operation */ SL2Op<DType, Acctype> g(X,C); SL[b][i][k] = S[k] * reduceD<Acctype>(g,b,i,k,D);; } template<typename DType, typename Acctype> __global__ void ScaledL2_GradX_kernel ( DeviceTensor<DType, 3> GSL, DeviceTensor<DType, 3> GX, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int b, d, i, K; /* Get the index and channels */ b = blockIdx.z; d = blockIdx.x; i = blockIdx.y; K = C.getSize(0); /* main operation */ SL2GradXOp<DType, Acctype> g(GSL,X,C,S); GX[b][i][d] = reduceK<Acctype>(g,b,i,d,K); } template<typename DType, typename Acctype> __global__ void ScaledL2_GradC_kernel ( DeviceTensor<DType, 3> GSL, DeviceTensor<DType, 2> GC, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int k, d, B, N; /* Get the index and channels */ d = blockIdx.x; k = blockIdx.y; B = X.getSize(0); N = X.getSize(1); /* main operation */ SL2GradXOp<DType, Acctype> g(GSL,X,C,S); GC[k][d] = - reduceBN<Acctype>(g, k, d, B, N); } }// namespace at::Tensor Aggregate_Forward_CUDA( const at::Tensor A_, const at::Tensor X_, const at::Tensor C_) { /* Device tensors */ auto E_ = torch::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // B, K, D dim3 blocks(C_.size(1), C_.size(0), X_.size(0)); dim3 threads(getNumThreads(X_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] { DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); /* kernel function */ hipLaunchKernelGGL(( Aggregate_Forward_kernel<scalar_t, scalar_t>) , dim3(blocks), dim3(threads), 0, stream, E, A, X, C); })); AT_ASSERT(hipGetLastError() == hipSuccess); return E_; } std::vector<at::Tensor> Aggregate_Backward_CUDA( const at::Tensor GE_, const at::Tensor A_, const at::Tensor X_, const at::Tensor C_) { auto gradA_ = at::zeros_like(A_); auto gradX_ = at::bmm(A_, GE_); auto gradC_ = (-GE_ * A_.sum(1).unsqueeze(2)).sum(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // B, K, D dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_); DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); hipLaunchKernelGGL(( Aggregate_Backward_kernel<scalar_t, scalar_t>) , dim3(blocks), dim3(threads), 0, stream, GA, GE, A, X, C); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {gradA_, gradX_, gradC_}; } at::Tensor ScaledL2_Forward_CUDA( const at::Tensor X_, const at::Tensor C_, const at::Tensor S_) { auto SL_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> SL = devicetensor<scalar_t, 3>(SL_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_); /* kernel function */ hipLaunchKernelGGL(( ScaledL2_Forward_kernel<scalar_t, scalar_t>) , dim3(blocks), dim3(threads), 0, stream, SL, X, C, S); })); AT_ASSERT(hipGetLastError() == hipSuccess); return SL_; } std::vector<at::Tensor> ScaledL2_Backward_CUDA( const at::Tensor GSL_, const at::Tensor X_, const at::Tensor C_, const at::Tensor S_, const at::Tensor SL_) { auto GX_ = at::zeros_like(X_); auto GC_ = at::zeros_like(C_); /* kernel function */ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks1(X_.size(2), X_.size(1), X_.size(0)); dim3 threads1(getNumThreads(C_.size(0))); dim3 blocks2(C_.size(1), C_.size(0)); dim3 threads2(getNumThreads(X_.size(1))); auto GS_ = (GSL_ * (SL_ / S_.view({1, 1, C_.size(0)}))).sum(0).sum(0); AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GSL = devicetensor<scalar_t, 3>(GSL_); DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_); DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_); hipLaunchKernelGGL(( ScaledL2_GradX_kernel<scalar_t, scalar_t>) , dim3(blocks1), dim3(threads1), 0, stream, GSL, GX, X, C, S); AT_ASSERT(hipGetLastError() == hipSuccess); hipLaunchKernelGGL(( ScaledL2_GradC_kernel<scalar_t, scalar_t>) , dim3(blocks2), dim3(threads2), 0, stream, GSL, GC, X, C, S); AT_ASSERT(hipGetLastError() == hipSuccess); })); return {GX_, GC_, GS_}; }
31b3bbe65b8494ae73d723ba0da8fc0a531ad46b.cu
#include <vector> #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include "common.h" #include "device_tensor.h" namespace { template<typename DType, typename Acctype> struct AggOp { __device__ AggOp(DeviceTensor<DType, 3> a, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : A(a), X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d])); } DeviceTensor<DType, 3> A; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct AggBackOp { __device__ AggBackOp(DeviceTensor<DType, 3> g, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : G(g), X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d])); } DeviceTensor<DType, 3> G; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct SL2Op { __device__ SL2Op(DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c) : X(x), C(c) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { DType r = X[b][i][d] - C[k][d]; return ScalarConvert<DType, Acctype>::to(r * r); } DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; }; template<typename DType, typename Acctype> struct SL2GradXOp { __device__ SL2GradXOp( DeviceTensor<DType, 3> gsl, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 1> s ) : GSL(gsl), X(x), C(c), S(s) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to( 2 * S[k] * GSL[b][i][k] * (X[b][i][d]-C[k][d])); } DeviceTensor<DType, 3> GSL; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 1> S; }; template<typename DType, typename Acctype> __global__ void Aggregate_Forward_kernel ( DeviceTensor<DType, 3> E, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C) { /* declarations of the variables */ int b, k, d, N; /* Get the index and channels */ b = blockIdx.z; d = blockIdx.x; k = blockIdx.y; N = X.getSize(1); /* main operation */ AggOp<DType, Acctype> g(A, X, C); E[b][k][d] = reduceN<Acctype>(g, b, k, d, N); } template<typename DType, typename Acctype> __global__ void Aggregate_Backward_kernel ( DeviceTensor<DType, 3> GA, DeviceTensor<DType, 3> GE, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; i = blockIdx.y; k = blockIdx.x; D = GE.getSize(2); /* main operation */ AggBackOp<DType, Acctype> g(GE, X, C); GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D); } template<typename DType, typename Acctype> __global__ void ScaledL2_Forward_kernel ( DeviceTensor<DType, 3> SL, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; k = blockIdx.x; i = blockIdx.y; D = X.getSize(2); /* main operation */ SL2Op<DType, Acctype> g(X,C); SL[b][i][k] = S[k] * reduceD<Acctype>(g,b,i,k,D);; } template<typename DType, typename Acctype> __global__ void ScaledL2_GradX_kernel ( DeviceTensor<DType, 3> GSL, DeviceTensor<DType, 3> GX, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int b, d, i, K; /* Get the index and channels */ b = blockIdx.z; d = blockIdx.x; i = blockIdx.y; K = C.getSize(0); /* main operation */ SL2GradXOp<DType, Acctype> g(GSL,X,C,S); GX[b][i][d] = reduceK<Acctype>(g,b,i,d,K); } template<typename DType, typename Acctype> __global__ void ScaledL2_GradC_kernel ( DeviceTensor<DType, 3> GSL, DeviceTensor<DType, 2> GC, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 1> S) { /* declarations of the variables */ int k, d, B, N; /* Get the index and channels */ d = blockIdx.x; k = blockIdx.y; B = X.getSize(0); N = X.getSize(1); /* main operation */ SL2GradXOp<DType, Acctype> g(GSL,X,C,S); GC[k][d] = - reduceBN<Acctype>(g, k, d, B, N); } }// namespace at::Tensor Aggregate_Forward_CUDA( const at::Tensor A_, const at::Tensor X_, const at::Tensor C_) { /* Device tensors */ auto E_ = torch::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // B, K, D dim3 blocks(C_.size(1), C_.size(0), X_.size(0)); dim3 threads(getNumThreads(X_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] { DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); /* kernel function */ Aggregate_Forward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>>(E, A, X, C); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return E_; } std::vector<at::Tensor> Aggregate_Backward_CUDA( const at::Tensor GE_, const at::Tensor A_, const at::Tensor X_, const at::Tensor C_) { auto gradA_ = at::zeros_like(A_); auto gradX_ = at::bmm(A_, GE_); auto gradC_ = (-GE_ * A_.sum(1).unsqueeze(2)).sum(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // B, K, D dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_); DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); Aggregate_Backward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>> (GA, GE, A, X, C); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradA_, gradX_, gradC_}; } at::Tensor ScaledL2_Forward_CUDA( const at::Tensor X_, const at::Tensor C_, const at::Tensor S_) { auto SL_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> SL = devicetensor<scalar_t, 3>(SL_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_); /* kernel function */ ScaledL2_Forward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>> (SL, X, C, S); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return SL_; } std::vector<at::Tensor> ScaledL2_Backward_CUDA( const at::Tensor GSL_, const at::Tensor X_, const at::Tensor C_, const at::Tensor S_, const at::Tensor SL_) { auto GX_ = at::zeros_like(X_); auto GC_ = at::zeros_like(C_); /* kernel function */ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks1(X_.size(2), X_.size(1), X_.size(0)); dim3 threads1(getNumThreads(C_.size(0))); dim3 blocks2(C_.size(1), C_.size(0)); dim3 threads2(getNumThreads(X_.size(1))); auto GS_ = (GSL_ * (SL_ / S_.view({1, 1, C_.size(0)}))).sum(0).sum(0); AT_DISPATCH_FLOATING_TYPES(X_.type(), "ScaledL2_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GSL = devicetensor<scalar_t, 3>(GSL_); DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_); DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 1> S = devicetensor<scalar_t, 1>(S_); ScaledL2_GradX_kernel<scalar_t, scalar_t> <<<blocks1, threads1, 0, stream>>> (GSL, GX, X, C, S); AT_ASSERT(cudaGetLastError() == cudaSuccess); ScaledL2_GradC_kernel<scalar_t, scalar_t> <<<blocks2, threads2, 0, stream>>> (GSL, GC, X, C, S); AT_ASSERT(cudaGetLastError() == cudaSuccess); })); return {GX_, GC_, GS_}; }
c6c2f408566873a9837ff26ba39edf4aac79d2d8.hip
// !!! This is a file automatically generated by hipify!!! // mining.cu /******************************************************************************* MINING -- Autolykos parallel BlockMining procedure *******************************************************************************/ #include "../include/mining.h" #include <hip/hip_runtime.h> __device__ __forceinline__ uint32_t ld_gbl_cs(const uint32_t *p) { uint32_t v; asm("ld.global.cs.u32 %0, [%1];" : "=r"(v) : "l"(p)); return v; } __device__ __forceinline__ uint4 ld_gbl_cs_v4(const uint4* p) { uint4 v; asm("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];" : "=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "l"(p)); return v; } __device__ __forceinline__ uint32_t cuda_swab32(uint32_t x) { /* device */ return __byte_perm(x, x, 0x0123); } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devROTR64(uint64_t b, int offset) { uint2 a; uint2 result; a = vectorize(b); if (offset < 32) { asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return devectorize(result); } __device__ __forceinline__ uint2 __byte_perm_64(const uint2 source, const uint32_t grab1, const uint32_t grab2) { uint2 r; asm("prmt.b32 %0, %1, %2, %3;" : "=r"(r.x) : "r"(source.x), "r"(source.y), "r"(grab1)); asm("prmt.b32 %0, %1, %2, %3;" : "=r"(r.y) : "r"(source.x), "r"(source.y), "r"(grab2)); return r; } __device__ __forceinline__ uint2 __swap_hilo(const uint2 source) { uint2 r; r.x = source.y; r.y = source.x; return r; } __device__ __forceinline__ void devB2B_G(uint64_t* v, int a, int b, int c, int d, uint64_t x, uint64_t y) { ((uint64_t *)(v))[a] += ((uint64_t *)(v))[b] + x; ((uint64_t *)(v))[d] = devROTR64(((uint64_t *)(v))[d] ^ ((uint64_t *)(v))[a], 32); ((uint64_t *)(v))[c] += ((uint64_t *)(v))[d]; ((uint64_t *)(v))[b] = devROTR64(((uint64_t *)(v))[b] ^ ((uint64_t *)(v))[c], 24); ((uint64_t *)(v))[a] += ((uint64_t *)(v))[b] + y; ((uint64_t *)(v))[d] = devROTR64(((uint64_t *)(v))[d] ^ ((uint64_t *)(v))[a], 16); ((uint64_t *)(v))[c] += ((uint64_t *)(v))[d]; ((uint64_t *)(v))[b] = devROTR64(((uint64_t *)(v))[b] ^ ((uint64_t *)(v))[c], 63); } __device__ __forceinline__ void devB2B_MIX(uint64_t* v, uint64_t* m) \ { devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 5]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 7]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[11]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[14], ((uint64_t *)(m))[10]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 8]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[15]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 6]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 2]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 7]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 3]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 8]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 0]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 2]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[15], ((uint64_t *)(m))[13]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[10], ((uint64_t *)(m))[14]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 6]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 1]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 4]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 1]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[13], ((uint64_t *)(m))[12]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[11], ((uint64_t *)(m))[14]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 6]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[10]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 0]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[15], ((uint64_t *)(m))[ 8]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 0]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 7]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 4]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[10], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[14], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[11], ((uint64_t *)(m))[12]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 8]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[13]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[10]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[11]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 3]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[13]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 5]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[15], ((uint64_t *)(m))[14]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 9]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 5]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[15]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[10]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 7]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 2]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[11]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[13], ((uint64_t *)(m))[11]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[14]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 1]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 9]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 0]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[15], ((uint64_t *)(m))[ 4]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 6]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[10]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[15]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[14], ((uint64_t *)(m))[ 9]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 3]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 8]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 2]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 7]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 4]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[10], ((uint64_t *)(m))[ 5]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[ 2]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 4]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 6]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 5]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[15], ((uint64_t *)(m))[11]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[14]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[12]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 0]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 5]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 7]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[11]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[14], ((uint64_t *)(m))[10]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 8]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[15]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 6]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 2]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 7]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 3]); } __device__ __forceinline__ void devDEVICE_B2B_H_LAST(ctx_t *ctx, uint64_t* aux) \ { asm volatile ( "add.cc.u32 %0, %0, %1;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[0]): "r"(((ctx_t *)(ctx))->c) ); asm volatile ( "addc.cc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[1]) ); asm volatile ( "addc.cc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[2]) ); asm volatile ( "addc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[3]) ); while (((ctx_t *)(ctx))->c < BUF_SIZE_8) { ((ctx_t *)(ctx))->b[((ctx_t *)(ctx))->c++] = 0; } ((uint64_t *)(aux))[0] = ((ctx_t *)(ctx))->h[0]; ((uint64_t *)(aux))[1] = ((ctx_t *)(ctx))->h[1]; ((uint64_t *)(aux))[2] = ((ctx_t *)(ctx))->h[2]; ((uint64_t *)(aux))[3] = ((ctx_t *)(ctx))->h[3]; ((uint64_t *)(aux))[4] = ((ctx_t *)(ctx))->h[4]; ((uint64_t *)(aux))[5] = ((ctx_t *)(ctx))->h[5]; ((uint64_t *)(aux))[6] = ((ctx_t *)(ctx))->h[6]; ((uint64_t *)(aux))[7] = ((ctx_t *)(ctx))->h[7]; B2B_IV(aux + 8); ((uint64_t *)(aux))[12] ^= ((ctx_t *)(ctx))->t[0]; ((uint64_t *)(aux))[13] ^= ((ctx_t *)(ctx))->t[1]; ((uint64_t *)(aux))[14] = ~((uint64_t *)(aux))[14]; ((uint64_t *)(aux))[16] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 0]; ((uint64_t *)(aux))[17] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 1]; ((uint64_t *)(aux))[18] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 2]; ((uint64_t *)(aux))[19] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 3]; ((uint64_t *)(aux))[20] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 4]; ((uint64_t *)(aux))[21] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 5]; ((uint64_t *)(aux))[22] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 6]; ((uint64_t *)(aux))[23] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 7]; ((uint64_t *)(aux))[24] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 8]; ((uint64_t *)(aux))[25] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 9]; ((uint64_t *)(aux))[26] = ((uint64_t *)(((ctx_t *)(ctx))->b))[10]; ((uint64_t *)(aux))[27] = ((uint64_t *)(((ctx_t *)(ctx))->b))[11]; ((uint64_t *)(aux))[28] = ((uint64_t *)(((ctx_t *)(ctx))->b))[12]; ((uint64_t *)(aux))[29] = ((uint64_t *)(((ctx_t *)(ctx))->b))[13]; ((uint64_t *)(aux))[30] = ((uint64_t *)(((ctx_t *)(ctx))->b))[14]; ((uint64_t *)(aux))[31] = ((uint64_t *)(((ctx_t *)(ctx))->b))[15]; devB2B_MIX(aux, aux + 16); ((ctx_t *)(ctx))->h[0] ^= ((uint64_t *)(aux))[0] ^ ((uint64_t *)(aux))[ 8]; ((ctx_t *)(ctx))->h[1] ^= ((uint64_t *)(aux))[1] ^ ((uint64_t *)(aux))[ 9]; ((ctx_t *)(ctx))->h[2] ^= ((uint64_t *)(aux))[2] ^ ((uint64_t *)(aux))[10]; ((ctx_t *)(ctx))->h[3] ^= ((uint64_t *)(aux))[3] ^ ((uint64_t *)(aux))[11]; ((ctx_t *)(ctx))->h[4] ^= ((uint64_t *)(aux))[4] ^ ((uint64_t *)(aux))[12]; ((ctx_t *)(ctx))->h[5] ^= ((uint64_t *)(aux))[5] ^ ((uint64_t *)(aux))[13]; ((ctx_t *)(ctx))->h[6] ^= ((uint64_t *)(aux))[6] ^ ((uint64_t *)(aux))[14]; ((ctx_t *)(ctx))->h[7] ^= ((uint64_t *)(aux))[7] ^ ((uint64_t *)(aux))[15]; return; } //////////////////////////////////////////////////////////////////////////////// // Unfinalized hash of message //////////////////////////////////////////////////////////////////////////////// void InitMining( // context ctx_t * ctx, // message const uint32_t * mes, // message length in bytes const uint32_t meslen ) { //========================================================================// // Initialize context //========================================================================// memset(ctx->b, 0, BUF_SIZE_8); B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; memset(ctx->t, 0, 16); ctx->c = 0; //========================================================================// // Hash message //========================================================================// for (uint_t j = 0; j < meslen; ++j) { //if (ctx->c == BUF_SIZE_8) { HOST_B2B_H(ctx, aux); } ctx->b[ctx->c++] = ((const uint8_t *)mes)[j]; } return; } //////////////////////////////////////////////////////////////////////////////// // Block mining //////////////////////////////////////////////////////////////////////////////// __global__ __launch_bounds__(64, 64) __global__ void BlockMining( // boundary for puzzle const uint32_t * bound, // data: mes ctx const uint32_t * data, // nonce base const uint64_t base, // block height const uint32_t height, // precalculated hashes const uint32_t * hashes, // indices of valid solutions uint32_t * valid , uint32_t * count ) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; uint32_t const thread_id = threadIdx.x & 7; uint32_t const thrdblck_id = threadIdx.x; uint32_t const hash_id = threadIdx.x >> 3; uint64_t aux[32] = {0}; uint32_t ind[32] = {0}; uint32_t r[9] = {0}; uint4 v1 = {0,0,0,0}; uint4 v2 = {0,0,0,0}; uint4 v3 = {0,0,0,0}; uint4 v4 = {0,0,0,0}; ctx_t sdata; ctx_t *ctx = ((ctx_t * )(&sdata)); __shared__ uint32_t shared_index[64]; __shared__ uint32_t shared_data[512]; if (tid < NONCES_PER_ITER) { uint32_t j; uint32_t non[2]; asm volatile ( "add.cc.u32 %0, %1, %2;": "=r"(non[0]): "r"(((uint32_t *)&base)[0]), "r"(tid) ); asm volatile ( "addc.u32 %0, %1, 0;": "=r"(non[1]): "r"(((uint32_t *)&base)[1]) ); //================================================================// // Hash nonce //================================================================// for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = (( const uint8_t *)data)[j]; } ctx->b[ctx->c++] = ((uint8_t *)non)[7]; ctx->b[ctx->c++] = ((uint8_t *)non)[6]; ctx->b[ctx->c++] = ((uint8_t *)non)[5]; ctx->b[ctx->c++] = ((uint8_t *)non)[4]; ctx->b[ctx->c++] = ((uint8_t *)non)[3]; ctx->b[ctx->c++] = ((uint8_t *)non)[2]; ctx->b[ctx->c++] = ((uint8_t *)non)[1]; ctx->b[ctx->c++] = ((uint8_t *)non)[0]; //================================================================// // Finalize hashes //================================================================// devDEVICE_B2B_H_LAST(ctx, aux); //---------------------------------------------------------------------------------------------------------------------// //Begin lookup * * * * * * * * * * * * * * * * * * * * * #pragma unroll 32 for (j = 0; j < NUM_SIZE_8; ++j) { ((uint8_t *)r)[j] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } uint64_t h2; ((uint8_t*)&h2)[0] = ((uint8_t*)r)[31]; ((uint8_t*)&h2)[1] = ((uint8_t*)r)[30]; ((uint8_t*)&h2)[2] = ((uint8_t*)r)[29]; ((uint8_t*)&h2)[3] = ((uint8_t*)r)[28]; ((uint8_t*)&h2)[4] = ((uint8_t*)r)[27]; ((uint8_t*)&h2)[5] = ((uint8_t*)r)[26]; ((uint8_t*)&h2)[6] = ((uint8_t*)r)[25]; ((uint8_t*)&h2)[7] = ((uint8_t*)r)[24]; uint32_t h3 = h2 % N_LEN; #pragma unroll 8 for (int i = 0; i < 8; ++i) { r[7-i] = cuda_swab32(hashes[(h3 << 3) + i]); } /*if (tid == 0) { printf("\n"); for (int j = 0; j < 8; j++) { printf("%08x", r[j]); } printf("\n"); }*/ //====================================================================// // Initialize context //====================================================================// #pragma unroll 8 for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; //====================================================================// // Hash //====================================================================// #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8 - 1; ++j) { ctx->b[ctx->c++] = ((const uint8_t *)r)[j + 1]; } //====================================================================// // Hash message //====================================================================// #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = (( const uint8_t *)data)[j]; } //================================================================// // Hash nonce //================================================================// ctx->b[ctx->c++] = ((uint8_t *)non)[7]; ctx->b[ctx->c++] = ((uint8_t *)non)[6]; ctx->b[ctx->c++] = ((uint8_t *)non)[5]; ctx->b[ctx->c++] = ((uint8_t *)non)[4]; ctx->b[ctx->c++] = ((uint8_t *)non)[3]; ctx->b[ctx->c++] = ((uint8_t *)non)[2]; ctx->b[ctx->c++] = ((uint8_t *)non)[1]; ctx->b[ctx->c++] = ((uint8_t *)non)[0]; //---------------------------------------------------------------------------------------------------------------------// //================================================================// // Finalize hashes //================================================================// devDEVICE_B2B_H_LAST(ctx, aux); #pragma unroll 32 for (j = 0; j < 32; ++j) { ((uint8_t *)r)[(j & 0xFFFFFFFC) + (3 - (j & 3))] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } //================================================================// // Generate indices //================================================================// ((uint8_t *)r)[33] = ((uint8_t *)r)[1]; ((uint8_t *)r)[34] = ((uint8_t *)r)[2]; ((uint8_t *)r)[35] = ((uint8_t *)r)[3]; #pragma unroll for (int k = 0; k < K_LEN; k += 4) { ind[k] = r[k >> 2] & N_MASK; ind[k + 1] = ((r[k >> 2] << 8) | (r[(k >> 2) + 1] >> 24)) & N_MASK; ind[k + 2] = ((r[k >> 2] << 16) | (r[(k >> 2) + 1] >> 16)) & N_MASK; ind[k + 3] = ((r[k >> 2] << 24) | (r[(k >> 2) + 1] >> 8)) & N_MASK; } //---------------------------------------------------------------------------------------------------------------------// //================================================================// // Calculate result //================================================================// shared_index[thrdblck_id] = ind[0]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v1.x = shared_data[(thrdblck_id<<3) + 0]; v1.y = shared_data[(thrdblck_id<<3) + 1]; v1.z = shared_data[(thrdblck_id<<3) + 2]; v1.w = shared_data[(thrdblck_id<<3) + 3]; v3.x = shared_data[(thrdblck_id<<3) + 4]; v3.y = shared_data[(thrdblck_id<<3) + 5]; v3.z = shared_data[(thrdblck_id<<3) + 6]; v3.w = shared_data[(thrdblck_id<<3) + 7]; shared_index[thrdblck_id] = ind[1]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v2.x = shared_data[(thrdblck_id<<3) + 0]; v2.y = shared_data[(thrdblck_id<<3) + 1]; v2.z = shared_data[(thrdblck_id<<3) + 2]; v2.w = shared_data[(thrdblck_id<<3) + 3]; v4.x = shared_data[(thrdblck_id<<3) + 4]; v4.y = shared_data[(thrdblck_id<<3) + 5]; v4.z = shared_data[(thrdblck_id<<3) + 6]; v4.w = shared_data[(thrdblck_id<<3) + 7]; asm volatile ("add.cc.u32 %0, %1, %2;":"=r"(r[0]):"r"(v1.x),"r"(v2.x)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[1]):"r"(v1.y),"r"(v2.y)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[2]):"r"(v1.z),"r"(v2.z)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[3]):"r"(v1.w),"r"(v2.w)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[4]):"r"(v3.x),"r"(v4.x)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[5]):"r"(v3.y),"r"(v4.y)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[6]):"r"(v3.z),"r"(v4.z)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[7]):"r"(v3.w),"r"(v4.w)); asm volatile ("addc.u32 %0, 0, 0;": "=r"(r[8])); ////////////////////////////////////////////////////////////////////////////////////////////////////////// // remaining additions #pragma unroll for (int k = 2; k < K_LEN; ++k) { shared_index[thrdblck_id] = ind[k]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v1.x = shared_data[(thrdblck_id<<3) + 0]; v1.y = shared_data[(thrdblck_id<<3) + 1]; v1.z = shared_data[(thrdblck_id<<3) + 2]; v1.w = shared_data[(thrdblck_id<<3) + 3]; v2.x = shared_data[(thrdblck_id<<3) + 4]; v2.y = shared_data[(thrdblck_id<<3) + 5]; v2.z = shared_data[(thrdblck_id<<3) + 6]; v2.w = shared_data[(thrdblck_id<<3) + 7]; asm volatile ("add.cc.u32 %0, %0, %1;":"+r"(r[0]): "r"(v1.x)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[1]): "r"(v1.y)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[2]): "r"(v1.z)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[3]): "r"(v1.w)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[4]): "r"(v2.x)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[5]): "r"(v2.y)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[6]): "r"(v2.z)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[7]): "r"(v2.w)); asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[8])); } ////////////////////////////////////////////////////////////////////////////////////////////////////////// //--------------------hash(f)-------------------- //====================================================================// // Initialize context //====================================================================// //memset(ctx->b, 0, BUF_SIZE_8); for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; //--------------hash-------------------- for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = ((const uint8_t *)r)[NUM_SIZE_8 - j - 1]; } //====================================================================// // Finalize hash //====================================================================// devDEVICE_B2B_H_LAST(ctx, aux); for (j = 0; j < NUM_SIZE_8; ++j) { ((uint8_t*)r)[NUM_SIZE_8 - j - 1] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } //================================================================// // Dump result to global memory -- LITTLE ENDIAN //================================================================// j = ((uint64_t *)r)[3] < ((uint64_t *)bound)[3] || ((uint64_t *)r)[3] == ((uint64_t *)bound)[3] && ( ((uint64_t *)r)[2] < ((uint64_t *)bound)[2] || ((uint64_t *)r)[2] == ((uint64_t *)bound)[2] && ( ((uint64_t *)r)[1] < ((uint64_t *)bound)[1] || ((uint64_t *)r)[1] == ((uint64_t *)bound)[1] && ((uint64_t *)r)[0] < ((uint64_t *)bound)[0] ) ); if(j ) { uint32_t id = atomicInc(count, MAX_SOLS); valid[id] = tid+1; } } return; } // mining.cu
c6c2f408566873a9837ff26ba39edf4aac79d2d8.cu
// mining.cu /******************************************************************************* MINING -- Autolykos parallel BlockMining procedure *******************************************************************************/ #include "../include/mining.h" #include <cuda.h> __device__ __forceinline__ uint32_t ld_gbl_cs(const uint32_t *p) { uint32_t v; asm("ld.global.cs.u32 %0, [%1];" : "=r"(v) : "l"(p)); return v; } __device__ __forceinline__ uint4 ld_gbl_cs_v4(const uint4* p) { uint4 v; asm("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];" : "=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "l"(p)); return v; } __device__ __forceinline__ uint32_t cuda_swab32(uint32_t x) { /* device */ return __byte_perm(x, x, 0x0123); } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devROTR64(uint64_t b, int offset) { uint2 a; uint2 result; a = vectorize(b); if (offset < 32) { asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return devectorize(result); } __device__ __forceinline__ uint2 __byte_perm_64(const uint2 source, const uint32_t grab1, const uint32_t grab2) { uint2 r; asm("prmt.b32 %0, %1, %2, %3;" : "=r"(r.x) : "r"(source.x), "r"(source.y), "r"(grab1)); asm("prmt.b32 %0, %1, %2, %3;" : "=r"(r.y) : "r"(source.x), "r"(source.y), "r"(grab2)); return r; } __device__ __forceinline__ uint2 __swap_hilo(const uint2 source) { uint2 r; r.x = source.y; r.y = source.x; return r; } __device__ __forceinline__ void devB2B_G(uint64_t* v, int a, int b, int c, int d, uint64_t x, uint64_t y) { ((uint64_t *)(v))[a] += ((uint64_t *)(v))[b] + x; ((uint64_t *)(v))[d] = devROTR64(((uint64_t *)(v))[d] ^ ((uint64_t *)(v))[a], 32); ((uint64_t *)(v))[c] += ((uint64_t *)(v))[d]; ((uint64_t *)(v))[b] = devROTR64(((uint64_t *)(v))[b] ^ ((uint64_t *)(v))[c], 24); ((uint64_t *)(v))[a] += ((uint64_t *)(v))[b] + y; ((uint64_t *)(v))[d] = devROTR64(((uint64_t *)(v))[d] ^ ((uint64_t *)(v))[a], 16); ((uint64_t *)(v))[c] += ((uint64_t *)(v))[d]; ((uint64_t *)(v))[b] = devROTR64(((uint64_t *)(v))[b] ^ ((uint64_t *)(v))[c], 63); } __device__ __forceinline__ void devB2B_MIX(uint64_t* v, uint64_t* m) \ { devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 5]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 7]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[11]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[14], ((uint64_t *)(m))[10]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 8]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[15]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 6]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 2]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 7]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 3]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 8]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 0]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 2]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[15], ((uint64_t *)(m))[13]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[10], ((uint64_t *)(m))[14]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 6]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 1]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 4]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 1]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[13], ((uint64_t *)(m))[12]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[11], ((uint64_t *)(m))[14]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 6]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[10]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 0]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[15], ((uint64_t *)(m))[ 8]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 0]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 7]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 4]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[10], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[14], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[11], ((uint64_t *)(m))[12]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 8]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[13]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[10]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[11]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 3]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[13]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 5]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[15], ((uint64_t *)(m))[14]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 9]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 5]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[15]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[10]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 7]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[ 2]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[11]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[13], ((uint64_t *)(m))[11]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[14]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 1]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[ 9]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 0]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[15], ((uint64_t *)(m))[ 4]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 6]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[10]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[15]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[14], ((uint64_t *)(m))[ 9]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 3]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 8]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[12], ((uint64_t *)(m))[ 2]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 7]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 4]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[10], ((uint64_t *)(m))[ 5]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[ 2]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 4]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 7], ((uint64_t *)(m))[ 6]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[ 5]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[15], ((uint64_t *)(m))[11]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[14]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[ 3], ((uint64_t *)(m))[12]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 0]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 1]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 2], ((uint64_t *)(m))[ 3]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 5]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[ 6], ((uint64_t *)(m))[ 7]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 8], ((uint64_t *)(m))[ 9]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[10], ((uint64_t *)(m))[11]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[12], ((uint64_t *)(m))[13]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[14], ((uint64_t *)(m))[15]); devB2B_G(v, 0, 4, 8, 12, ((uint64_t *)(m))[14], ((uint64_t *)(m))[10]); devB2B_G(v, 1, 5, 9, 13, ((uint64_t *)(m))[ 4], ((uint64_t *)(m))[ 8]); devB2B_G(v, 2, 6, 10, 14, ((uint64_t *)(m))[ 9], ((uint64_t *)(m))[15]); devB2B_G(v, 3, 7, 11, 15, ((uint64_t *)(m))[13], ((uint64_t *)(m))[ 6]); devB2B_G(v, 0, 5, 10, 15, ((uint64_t *)(m))[ 1], ((uint64_t *)(m))[12]); devB2B_G(v, 1, 6, 11, 12, ((uint64_t *)(m))[ 0], ((uint64_t *)(m))[ 2]); devB2B_G(v, 2, 7, 8, 13, ((uint64_t *)(m))[11], ((uint64_t *)(m))[ 7]); devB2B_G(v, 3, 4, 9, 14, ((uint64_t *)(m))[ 5], ((uint64_t *)(m))[ 3]); } __device__ __forceinline__ void devDEVICE_B2B_H_LAST(ctx_t *ctx, uint64_t* aux) \ { asm volatile ( "add.cc.u32 %0, %0, %1;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[0]): "r"(((ctx_t *)(ctx))->c) ); asm volatile ( "addc.cc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[1]) ); asm volatile ( "addc.cc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[2]) ); asm volatile ( "addc.u32 %0, %0, 0;": "+r"(((uint32_t *)((ctx_t *)(ctx))->t)[3]) ); while (((ctx_t *)(ctx))->c < BUF_SIZE_8) { ((ctx_t *)(ctx))->b[((ctx_t *)(ctx))->c++] = 0; } ((uint64_t *)(aux))[0] = ((ctx_t *)(ctx))->h[0]; ((uint64_t *)(aux))[1] = ((ctx_t *)(ctx))->h[1]; ((uint64_t *)(aux))[2] = ((ctx_t *)(ctx))->h[2]; ((uint64_t *)(aux))[3] = ((ctx_t *)(ctx))->h[3]; ((uint64_t *)(aux))[4] = ((ctx_t *)(ctx))->h[4]; ((uint64_t *)(aux))[5] = ((ctx_t *)(ctx))->h[5]; ((uint64_t *)(aux))[6] = ((ctx_t *)(ctx))->h[6]; ((uint64_t *)(aux))[7] = ((ctx_t *)(ctx))->h[7]; B2B_IV(aux + 8); ((uint64_t *)(aux))[12] ^= ((ctx_t *)(ctx))->t[0]; ((uint64_t *)(aux))[13] ^= ((ctx_t *)(ctx))->t[1]; ((uint64_t *)(aux))[14] = ~((uint64_t *)(aux))[14]; ((uint64_t *)(aux))[16] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 0]; ((uint64_t *)(aux))[17] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 1]; ((uint64_t *)(aux))[18] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 2]; ((uint64_t *)(aux))[19] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 3]; ((uint64_t *)(aux))[20] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 4]; ((uint64_t *)(aux))[21] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 5]; ((uint64_t *)(aux))[22] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 6]; ((uint64_t *)(aux))[23] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 7]; ((uint64_t *)(aux))[24] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 8]; ((uint64_t *)(aux))[25] = ((uint64_t *)(((ctx_t *)(ctx))->b))[ 9]; ((uint64_t *)(aux))[26] = ((uint64_t *)(((ctx_t *)(ctx))->b))[10]; ((uint64_t *)(aux))[27] = ((uint64_t *)(((ctx_t *)(ctx))->b))[11]; ((uint64_t *)(aux))[28] = ((uint64_t *)(((ctx_t *)(ctx))->b))[12]; ((uint64_t *)(aux))[29] = ((uint64_t *)(((ctx_t *)(ctx))->b))[13]; ((uint64_t *)(aux))[30] = ((uint64_t *)(((ctx_t *)(ctx))->b))[14]; ((uint64_t *)(aux))[31] = ((uint64_t *)(((ctx_t *)(ctx))->b))[15]; devB2B_MIX(aux, aux + 16); ((ctx_t *)(ctx))->h[0] ^= ((uint64_t *)(aux))[0] ^ ((uint64_t *)(aux))[ 8]; ((ctx_t *)(ctx))->h[1] ^= ((uint64_t *)(aux))[1] ^ ((uint64_t *)(aux))[ 9]; ((ctx_t *)(ctx))->h[2] ^= ((uint64_t *)(aux))[2] ^ ((uint64_t *)(aux))[10]; ((ctx_t *)(ctx))->h[3] ^= ((uint64_t *)(aux))[3] ^ ((uint64_t *)(aux))[11]; ((ctx_t *)(ctx))->h[4] ^= ((uint64_t *)(aux))[4] ^ ((uint64_t *)(aux))[12]; ((ctx_t *)(ctx))->h[5] ^= ((uint64_t *)(aux))[5] ^ ((uint64_t *)(aux))[13]; ((ctx_t *)(ctx))->h[6] ^= ((uint64_t *)(aux))[6] ^ ((uint64_t *)(aux))[14]; ((ctx_t *)(ctx))->h[7] ^= ((uint64_t *)(aux))[7] ^ ((uint64_t *)(aux))[15]; return; } //////////////////////////////////////////////////////////////////////////////// // Unfinalized hash of message //////////////////////////////////////////////////////////////////////////////// void InitMining( // context ctx_t * ctx, // message const uint32_t * mes, // message length in bytes const uint32_t meslen ) { //========================================================================// // Initialize context //========================================================================// memset(ctx->b, 0, BUF_SIZE_8); B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; memset(ctx->t, 0, 16); ctx->c = 0; //========================================================================// // Hash message //========================================================================// for (uint_t j = 0; j < meslen; ++j) { //if (ctx->c == BUF_SIZE_8) { HOST_B2B_H(ctx, aux); } ctx->b[ctx->c++] = ((const uint8_t *)mes)[j]; } return; } //////////////////////////////////////////////////////////////////////////////// // Block mining //////////////////////////////////////////////////////////////////////////////// __global__ __launch_bounds__(64, 64) __global__ void BlockMining( // boundary for puzzle const uint32_t * bound, // data: mes ctx const uint32_t * data, // nonce base const uint64_t base, // block height const uint32_t height, // precalculated hashes const uint32_t * hashes, // indices of valid solutions uint32_t * valid , uint32_t * count ) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; uint32_t const thread_id = threadIdx.x & 7; uint32_t const thrdblck_id = threadIdx.x; uint32_t const hash_id = threadIdx.x >> 3; uint64_t aux[32] = {0}; uint32_t ind[32] = {0}; uint32_t r[9] = {0}; uint4 v1 = {0,0,0,0}; uint4 v2 = {0,0,0,0}; uint4 v3 = {0,0,0,0}; uint4 v4 = {0,0,0,0}; ctx_t sdata; ctx_t *ctx = ((ctx_t * )(&sdata)); __shared__ uint32_t shared_index[64]; __shared__ uint32_t shared_data[512]; if (tid < NONCES_PER_ITER) { uint32_t j; uint32_t non[2]; asm volatile ( "add.cc.u32 %0, %1, %2;": "=r"(non[0]): "r"(((uint32_t *)&base)[0]), "r"(tid) ); asm volatile ( "addc.u32 %0, %1, 0;": "=r"(non[1]): "r"(((uint32_t *)&base)[1]) ); //================================================================// // Hash nonce //================================================================// for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = (( const uint8_t *)data)[j]; } ctx->b[ctx->c++] = ((uint8_t *)non)[7]; ctx->b[ctx->c++] = ((uint8_t *)non)[6]; ctx->b[ctx->c++] = ((uint8_t *)non)[5]; ctx->b[ctx->c++] = ((uint8_t *)non)[4]; ctx->b[ctx->c++] = ((uint8_t *)non)[3]; ctx->b[ctx->c++] = ((uint8_t *)non)[2]; ctx->b[ctx->c++] = ((uint8_t *)non)[1]; ctx->b[ctx->c++] = ((uint8_t *)non)[0]; //================================================================// // Finalize hashes //================================================================// devDEVICE_B2B_H_LAST(ctx, aux); //---------------------------------------------------------------------------------------------------------------------// //Begin lookup * * * * * * * * * * * * * * * * * * * * * #pragma unroll 32 for (j = 0; j < NUM_SIZE_8; ++j) { ((uint8_t *)r)[j] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } uint64_t h2; ((uint8_t*)&h2)[0] = ((uint8_t*)r)[31]; ((uint8_t*)&h2)[1] = ((uint8_t*)r)[30]; ((uint8_t*)&h2)[2] = ((uint8_t*)r)[29]; ((uint8_t*)&h2)[3] = ((uint8_t*)r)[28]; ((uint8_t*)&h2)[4] = ((uint8_t*)r)[27]; ((uint8_t*)&h2)[5] = ((uint8_t*)r)[26]; ((uint8_t*)&h2)[6] = ((uint8_t*)r)[25]; ((uint8_t*)&h2)[7] = ((uint8_t*)r)[24]; uint32_t h3 = h2 % N_LEN; #pragma unroll 8 for (int i = 0; i < 8; ++i) { r[7-i] = cuda_swab32(hashes[(h3 << 3) + i]); } /*if (tid == 0) { printf("\n"); for (int j = 0; j < 8; j++) { printf("%08x", r[j]); } printf("\n"); }*/ //====================================================================// // Initialize context //====================================================================// #pragma unroll 8 for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; //====================================================================// // Hash //====================================================================// #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8 - 1; ++j) { ctx->b[ctx->c++] = ((const uint8_t *)r)[j + 1]; } //====================================================================// // Hash message //====================================================================// #pragma unroll 32 for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = (( const uint8_t *)data)[j]; } //================================================================// // Hash nonce //================================================================// ctx->b[ctx->c++] = ((uint8_t *)non)[7]; ctx->b[ctx->c++] = ((uint8_t *)non)[6]; ctx->b[ctx->c++] = ((uint8_t *)non)[5]; ctx->b[ctx->c++] = ((uint8_t *)non)[4]; ctx->b[ctx->c++] = ((uint8_t *)non)[3]; ctx->b[ctx->c++] = ((uint8_t *)non)[2]; ctx->b[ctx->c++] = ((uint8_t *)non)[1]; ctx->b[ctx->c++] = ((uint8_t *)non)[0]; //---------------------------------------------------------------------------------------------------------------------// //================================================================// // Finalize hashes //================================================================// devDEVICE_B2B_H_LAST(ctx, aux); #pragma unroll 32 for (j = 0; j < 32; ++j) { ((uint8_t *)r)[(j & 0xFFFFFFFC) + (3 - (j & 3))] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } //================================================================// // Generate indices //================================================================// ((uint8_t *)r)[33] = ((uint8_t *)r)[1]; ((uint8_t *)r)[34] = ((uint8_t *)r)[2]; ((uint8_t *)r)[35] = ((uint8_t *)r)[3]; #pragma unroll for (int k = 0; k < K_LEN; k += 4) { ind[k] = r[k >> 2] & N_MASK; ind[k + 1] = ((r[k >> 2] << 8) | (r[(k >> 2) + 1] >> 24)) & N_MASK; ind[k + 2] = ((r[k >> 2] << 16) | (r[(k >> 2) + 1] >> 16)) & N_MASK; ind[k + 3] = ((r[k >> 2] << 24) | (r[(k >> 2) + 1] >> 8)) & N_MASK; } //---------------------------------------------------------------------------------------------------------------------// //================================================================// // Calculate result //================================================================// shared_index[thrdblck_id] = ind[0]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v1.x = shared_data[(thrdblck_id<<3) + 0]; v1.y = shared_data[(thrdblck_id<<3) + 1]; v1.z = shared_data[(thrdblck_id<<3) + 2]; v1.w = shared_data[(thrdblck_id<<3) + 3]; v3.x = shared_data[(thrdblck_id<<3) + 4]; v3.y = shared_data[(thrdblck_id<<3) + 5]; v3.z = shared_data[(thrdblck_id<<3) + 6]; v3.w = shared_data[(thrdblck_id<<3) + 7]; shared_index[thrdblck_id] = ind[1]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v2.x = shared_data[(thrdblck_id<<3) + 0]; v2.y = shared_data[(thrdblck_id<<3) + 1]; v2.z = shared_data[(thrdblck_id<<3) + 2]; v2.w = shared_data[(thrdblck_id<<3) + 3]; v4.x = shared_data[(thrdblck_id<<3) + 4]; v4.y = shared_data[(thrdblck_id<<3) + 5]; v4.z = shared_data[(thrdblck_id<<3) + 6]; v4.w = shared_data[(thrdblck_id<<3) + 7]; asm volatile ("add.cc.u32 %0, %1, %2;":"=r"(r[0]):"r"(v1.x),"r"(v2.x)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[1]):"r"(v1.y),"r"(v2.y)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[2]):"r"(v1.z),"r"(v2.z)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[3]):"r"(v1.w),"r"(v2.w)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[4]):"r"(v3.x),"r"(v4.x)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[5]):"r"(v3.y),"r"(v4.y)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[6]):"r"(v3.z),"r"(v4.z)); asm volatile ("addc.cc.u32 %0, %1, %2;":"=r"(r[7]):"r"(v3.w),"r"(v4.w)); asm volatile ("addc.u32 %0, 0, 0;": "=r"(r[8])); ////////////////////////////////////////////////////////////////////////////////////////////////////////// // remaining additions #pragma unroll for (int k = 2; k < K_LEN; ++k) { shared_index[thrdblck_id] = ind[k]; __syncthreads(); shared_data[(hash_id<<3)+thread_id] = (hashes[(shared_index[hash_id]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+64] = (hashes[(shared_index[hash_id+8]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+128] = (hashes[(shared_index[hash_id+16]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+192] = (hashes[(shared_index[hash_id+24]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+256] = (hashes[(shared_index[hash_id+32]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+320] = (hashes[(shared_index[hash_id+40]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+384] = (hashes[(shared_index[hash_id+48]<<3)+thread_id]); shared_data[(hash_id<<3)+thread_id+448] = (hashes[(shared_index[hash_id+56]<<3)+thread_id]); __syncthreads(); v1.x = shared_data[(thrdblck_id<<3) + 0]; v1.y = shared_data[(thrdblck_id<<3) + 1]; v1.z = shared_data[(thrdblck_id<<3) + 2]; v1.w = shared_data[(thrdblck_id<<3) + 3]; v2.x = shared_data[(thrdblck_id<<3) + 4]; v2.y = shared_data[(thrdblck_id<<3) + 5]; v2.z = shared_data[(thrdblck_id<<3) + 6]; v2.w = shared_data[(thrdblck_id<<3) + 7]; asm volatile ("add.cc.u32 %0, %0, %1;":"+r"(r[0]): "r"(v1.x)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[1]): "r"(v1.y)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[2]): "r"(v1.z)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[3]): "r"(v1.w)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[4]): "r"(v2.x)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[5]): "r"(v2.y)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[6]): "r"(v2.z)); asm volatile ("addc.cc.u32 %0, %0, %1;":"+r"(r[7]): "r"(v2.w)); asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[8])); } ////////////////////////////////////////////////////////////////////////////////////////////////////////// //--------------------hash(f)-------------------- //====================================================================// // Initialize context //====================================================================// //memset(ctx->b, 0, BUF_SIZE_8); for (int am = 0; am < BUF_SIZE_8; am++) { ctx->b[am] = 0; } B2B_IV(ctx->h); ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8; //memset(ctx->t, 0, 16); ctx->t[0] = 0; ctx->t[1] = 0; ctx->c = 0; //--------------hash-------------------- for (j = 0; ctx->c < BUF_SIZE_8 && j < NUM_SIZE_8; ++j) { ctx->b[ctx->c++] = ((const uint8_t *)r)[NUM_SIZE_8 - j - 1]; } //====================================================================// // Finalize hash //====================================================================// devDEVICE_B2B_H_LAST(ctx, aux); for (j = 0; j < NUM_SIZE_8; ++j) { ((uint8_t*)r)[NUM_SIZE_8 - j - 1] = (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF; } //================================================================// // Dump result to global memory -- LITTLE ENDIAN //================================================================// j = ((uint64_t *)r)[3] < ((uint64_t *)bound)[3] || ((uint64_t *)r)[3] == ((uint64_t *)bound)[3] && ( ((uint64_t *)r)[2] < ((uint64_t *)bound)[2] || ((uint64_t *)r)[2] == ((uint64_t *)bound)[2] && ( ((uint64_t *)r)[1] < ((uint64_t *)bound)[1] || ((uint64_t *)r)[1] == ((uint64_t *)bound)[1] && ((uint64_t *)r)[0] < ((uint64_t *)bound)[0] ) ); if(j ) { uint32_t id = atomicInc(count, MAX_SOLS); valid[id] = tid+1; } } return; } // mining.cu
291340e7310a2e5909fdbf0560f831da0b375953.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define GAPX (22) #define GAPY (22) #define EXTENT (5) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__+1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5*__temp_a2__ + 12*__temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15*__temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12*__temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5*__temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5*__temp_a9__+ 12*__temp_b5__); float __temp_b10__ = (__temp_b6__ + 15*__temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12*__temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5*__temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+1)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c6__ = (5*__temp_a17__+ 12*__temp_c5__); float __temp_c10__ = (__temp_c6__ + 15*__temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12*__temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5*__temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2-(__iter_1__+1)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d6__ = (5*__temp_b17__+ 12*__temp_d5__); float __temp_d10__ = (__temp_d6__ + 15*__temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12*__temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(0-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5*__temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3-(__iter_1__+1)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5*__temp_a2__ + 12*__temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15*__temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12*__temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5*__temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5*__temp_a9__+ 12*__temp_b5__); float __temp_b10__ = (__temp_b6__ + 15*__temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12*__temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5*__temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+1)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1); for(; __iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-1); for(; __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1); for(; __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2); __iter_7__++) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if (__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-1); for(; __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ; __iter_7__++ ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__+2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 1 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(0-(__iter_1__+1)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a36__ = (5*__temp_a32__ + 12*__temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a40__ = (__temp_a36__ + 15*__temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a44__ = (__temp_a40__ + 12*__temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_a48__ = (__temp_a44__ + 5*__temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_a49__; // iter 2 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_b36__ = (5*__temp_a39__+ 12*__temp_b35__); float __temp_b40__ = (__temp_b36__ + 15*__temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_b44__ = (__temp_b40__ + 12*__temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_b48__ = (__temp_b44__ + 5*__temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1-(__iter_1__+2)))] = __temp_b49__; // iter 3 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_c36__ = (5*__temp_a47__+ 12*__temp_c35__); float __temp_c40__ = (__temp_c36__ + 15*__temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_c44__ = (__temp_c40__ + 12*__temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_c48__ = (__temp_c44__ + 5*__temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2-(__iter_1__+2)))] = __temp_c49__; // iter 4 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_d36__ = (5*__temp_b47__+ 12*__temp_d35__); float __temp_d40__ = (__temp_d36__ + 15*__temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_d44__ = (__temp_d40__ + 12*__temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(0-(__iter_1__+1)))]); float __temp_d48__ = (__temp_d44__ + 5*__temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3-(__iter_1__+2)))] = __temp_d49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1); for(; __iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-1); for(; __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if (__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1); for(; __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2); __iter_13__++) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if (__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-1); for(; __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ; __iter_13__++){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__+3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ // iter 1 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a62__ = (5*__temp_a60__ + 12*__temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a64__ = (__temp_a62__ + 15*__temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a66__ = (__temp_a64__ + 12*__temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_a68__ = (__temp_a66__ + 5*__temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_a69__; // iter 2 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b62__ = (5*__temp_a63__+ 12*__temp_b61__); float __temp_b64__ = (__temp_b62__ + 15*__temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b66__ = (__temp_b64__ + 12*__temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_b68__ = (__temp_b66__ + 5*__temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+3)))] = __temp_b69__; // iter 3 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_c62__ = (5*__temp_a67__+ 12*__temp_c61__); float __temp_c64__ = (__temp_c62__ + 15*__temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_c66__ = (__temp_c64__ + 12*__temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_c68__ = (__temp_c66__ + 5*__temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2-(__iter_1__+3)))] = __temp_c69__; // iter 4 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_d62__ = (5*__temp_b67__+ 12*__temp_d61__); float __temp_d64__ = (__temp_d62__ + 15*__temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_d66__ = (__temp_d64__ + 12*__temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(0-(__iter_1__+2)))]); float __temp_d68__ = (__temp_d66__ + 5*__temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3-(__iter_1__+3)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ // iter 1 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a62__ = (5*__temp_a60__ + 12*__temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a64__ = (__temp_a62__ + 15*__temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a66__ = (__temp_a64__ + 12*__temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_a68__ = (__temp_a66__ + 5*__temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_a69__; // iter 2 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b62__ = (5*__temp_a63__+ 12*__temp_b61__); float __temp_b64__ = (__temp_b62__ + 15*__temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b66__ = (__temp_b64__ + 12*__temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_b68__ = (__temp_b66__ + 5*__temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+3)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1); for(; __iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-1); for(; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if (__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1); for(; __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2); __iter_19__++) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if (__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-1); for(; __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ; __iter_19__++){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__+4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a82__ = (5*__temp_a80__ + 12*__temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a84__ = (__temp_a82__ + 15*__temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a86__ = (__temp_a84__ + 12*__temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_a88__ = (__temp_a86__ + 5*__temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_b82__ = (5*__temp_a83__+ 12*__temp_b81__); float __temp_b84__ = (__temp_b82__ + 15*__temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_b86__ = (__temp_b84__ + 12*__temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_b88__ = (__temp_b86__ + 5*__temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_c82__ = (5*__temp_a87__+ 12*__temp_c81__); float __temp_c84__ = (__temp_c82__ + 15*__temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_c86__ = (__temp_c84__ + 12*__temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_c88__ = (__temp_c86__ + 5*__temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_d82__ = (5*__temp_b87__+ 12*__temp_d81__); float __temp_d84__ = (__temp_d82__ + 15*__temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_d86__ = (__temp_d84__ + 12*__temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(0-(__iter_1__+3)))]); float __temp_d88__ = (__temp_d86__ + 5*__temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); return SMemSize; } __global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__+1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_a19__; // iter 0 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__+ 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+0)))] = __temp_b19__; // iter 0 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__+ 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(0-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_a19__; // iter 0 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1); for(; __iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-1; for(; __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1); for(; __iter_7__ < FORMA_MAX((__iter_0__-1),1); __iter_7__++) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MIN(((__iter_0__+GAPX+1)),(M-2)); for(; __iter_7__ < FORMA_MIN(((__iter_0__+GAPX+3)),(M-2)); __iter_7__++){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__+2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(0-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_a49__; // iter 2 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1-(__iter_1__+0)))] = __temp_b49__; // iter 3 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2-(__iter_1__+0)))] = __temp_c49__; // iter 4 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(0-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3-(__iter_1__+0)))] = __temp_d49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1); for(; __iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-1); for(; __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1); for(; __iter_13__ < FORMA_MAX((__iter_0__-2),1); __iter_13__++) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MIN((__iter_0__+GAPX+2),(M-2)); for(; __iter_13__ < FORMA_MIN((__iter_0__+GAPX+4),(M-2)) ; __iter_13__++){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__+3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(0-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__+ 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1); for(; __iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-1; for(; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1); for(; __iter_19__ < FORMA_MAX((__iter_0__-3),1); __iter_19__++) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MIN(((__iter_0__+GAPX+3)),(M-2)); for(; __iter_19__ < FORMA_MIN(((__iter_0__+GAPX+5)),(M-2)) ; __iter_19__++ ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__+4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(0-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } } __global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__-2,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } else if( __iter_2__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__-1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__ + 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1); for(; __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2); __iter_7__++) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { int __iter_7__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-1); for(; __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ; __iter_7__++){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1); for(; __iter_6__ < FORMA_MAX((__iter_1__-1),1); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MIN(((__iter_1__+GAPY+1)),(N-2)); for(; __iter_6__ < FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__-2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; // iter 2 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c49__; // iter 3 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d49__; } } else if( __iter_10__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1); for(; __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2); __iter_13__++) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { int __iter_13__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-1); for(; __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ; __iter_13__++){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1); for(; __iter_12__ < FORMA_MAX((__iter_1__-2),1); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MIN(((__iter_1__+GAPY+2)),(N-2)); for(; __iter_12__ < FORMA_MIN(((__iter_1__+GAPY+4)),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__-3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ //iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ //iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1); for(; __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2); __iter_19__++) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_19__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-1); for(; __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ; __iter_19__++ ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1); for(; __iter_18__ < FORMA_MAX((__iter_1__-3),1); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)); for(; __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__-4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } else if( __iter_22__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; } } } __global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__-2,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } else if( __iter_2__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; } } __syncthreads (); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__-1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__ + 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1); for(; __iter_6__ < FORMA_MAX((__iter_1__-1),1); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MIN(((__iter_1__+GAPY+1)),(N-2)); for(; __iter_6__ < FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1); for(; __iter_7__ < FORMA_MAX((__iter_0__-1),1); __iter_7__++) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_7__ = FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))+1; for(; __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ; __iter_7__++){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__-2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; // iter 2 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c49__; // iter 3 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d49__; } } else if( __iter_10__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1); for(; __iter_12__ < FORMA_MAX((__iter_1__-2),1); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 1) { int __iter_12__ = FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))+1; for(; __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1); for(; __iter_13__ < FORMA_MAX((__iter_0__-2),1); __iter_13__++) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2))) { int __iter_13__ = FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))+1; for(; __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ; __iter_13__++){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__-3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1); for(; __iter_18__ < FORMA_MAX((__iter_1__-3),1); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))+1; for(; __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1); for(; __iter_19__ < FORMA_MAX((__iter_0__-3),1); __iter_19__++) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2))) { int __iter_19__ = FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))+1; for(; __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ; __iter_19__++){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__-4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } else if( __iter_22__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; hipMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; hipMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; hipMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); float * __copy_arr_t0__; hipMalloc(&__copy_arr_t0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t0__\n"); float * __copy_arr_t1__; hipMalloc(&__copy_arr_t1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t1__\n"); float * __copy_arr_t2__; hipMalloc(&__copy_arr_t2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__; int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__; int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){ if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9) __blockConfig___kernel___forma_kernel__0__.y /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__) break; if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9)) __blockConfig___kernel___forma_kernel__0__.x /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); } int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); dim3 unrollConfig(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n"); dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__copy_arr_0__); hipFree(__copy_arr_1__); hipFree(__copy_arr_2__); } /*Host Free End*/
291340e7310a2e5909fdbf0560f831da0b375953.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define GAPX (22) #define GAPY (22) #define EXTENT (5) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__+1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5*__temp_a2__ + 12*__temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15*__temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12*__temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5*__temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5*__temp_a9__+ 12*__temp_b5__); float __temp_b10__ = (__temp_b6__ + 15*__temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12*__temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5*__temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+1)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c6__ = (5*__temp_a17__+ 12*__temp_c5__); float __temp_c10__ = (__temp_c6__ + 15*__temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12*__temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5*__temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2-(__iter_1__+1)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d6__ = (5*__temp_b17__+ 12*__temp_d5__); float __temp_d10__ = (__temp_d6__ + 15*__temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12*__temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(0-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5*__temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3-(__iter_1__+1)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5*__temp_a2__ + 12*__temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15*__temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12*__temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5*__temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+1)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5*__temp_a9__+ 12*__temp_b5__); float __temp_b10__ = (__temp_b6__ + 15*__temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12*__temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5*__temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+1)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1); for(; __iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-1); for(; __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1); for(; __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2); __iter_7__++) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if (__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-1); for(; __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ; __iter_7__++ ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+1)))]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__+2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 1 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(-1)+(0-(__iter_1__+1)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a36__ = (5*__temp_a32__ + 12*__temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a40__ = (__temp_a36__ + 15*__temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+1)))]); float __temp_a44__ = (__temp_a40__ + 12*__temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_a48__ = (__temp_a44__ + 5*__temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+2)))] = __temp_a49__; // iter 2 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_b36__ = (5*__temp_a39__+ 12*__temp_b35__); float __temp_b40__ = (__temp_b36__ + 15*__temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+1)))]); float __temp_b44__ = (__temp_b40__ + 12*__temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_b48__ = (__temp_b44__ + 5*__temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1-(__iter_1__+2)))] = __temp_b49__; // iter 3 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_c36__ = (5*__temp_a47__+ 12*__temp_c35__); float __temp_c40__ = (__temp_c36__ + 15*__temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+1)))]); float __temp_c44__ = (__temp_c40__ + 12*__temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_c48__ = (__temp_c44__ + 5*__temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2-(__iter_1__+2)))] = __temp_c49__; // iter 4 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_d36__ = (5*__temp_b47__+ 12*__temp_d35__); float __temp_d40__ = (__temp_d36__ + 15*__temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+1)))]); float __temp_d44__ = (__temp_d40__ + 12*__temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+1))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(0-(__iter_1__+1)))]); float __temp_d48__ = (__temp_d44__ + 5*__temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3-(__iter_1__+2)))] = __temp_d49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1); for(; __iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-1); for(; __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if (__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1); for(; __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2); __iter_13__++) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if (__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-1); for(; __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ; __iter_13__++){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+2)))]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__+3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ // iter 1 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a62__ = (5*__temp_a60__ + 12*__temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a64__ = (__temp_a62__ + 15*__temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a66__ = (__temp_a64__ + 12*__temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_a68__ = (__temp_a66__ + 5*__temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_a69__; // iter 2 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b62__ = (5*__temp_a63__+ 12*__temp_b61__); float __temp_b64__ = (__temp_b62__ + 15*__temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b66__ = (__temp_b64__ + 12*__temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_b68__ = (__temp_b66__ + 5*__temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+3)))] = __temp_b69__; // iter 3 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_c62__ = (5*__temp_a67__+ 12*__temp_c61__); float __temp_c64__ = (__temp_c62__ + 15*__temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_c66__ = (__temp_c64__ + 12*__temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_c68__ = (__temp_c66__ + 5*__temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2-(__iter_1__+3)))] = __temp_c69__; // iter 4 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_d62__ = (5*__temp_b67__+ 12*__temp_d61__); float __temp_d64__ = (__temp_d62__ + 15*__temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+2)))]); float __temp_d66__ = (__temp_d64__ + 12*__temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(0-(__iter_1__+2)))]); float __temp_d68__ = (__temp_d66__ + 5*__temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3-(__iter_1__+3)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ // iter 1 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+2)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a62__ = (5*__temp_a60__ + 12*__temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a64__ = (__temp_a62__ + 15*__temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+2)))]); float __temp_a66__ = (__temp_a64__ + 12*__temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_a68__ = (__temp_a66__ + 5*__temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+3)))] = __temp_a69__; // iter 2 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b62__ = (5*__temp_a63__+ 12*__temp_b61__); float __temp_b64__ = (__temp_b62__ + 15*__temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+2)))]); float __temp_b66__ = (__temp_b64__ + 12*__temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+2)))]); float __temp_b68__ = (__temp_b66__ + 5*__temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+3)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1); for(; __iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-1); for(; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if (__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1); for(; __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2); __iter_19__++) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if (__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-1); for(; __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ; __iter_19__++){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+3)))]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__+4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+3)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a82__ = (5*__temp_a80__ + 12*__temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a84__ = (__temp_a82__ + 15*__temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+3)))]); float __temp_a86__ = (__temp_a84__ + 12*__temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_a88__ = (__temp_a86__ + 5*__temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_b82__ = (5*__temp_a83__+ 12*__temp_b81__); float __temp_b84__ = (__temp_b82__ + 15*__temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+3)))]); float __temp_b86__ = (__temp_b84__ + 12*__temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_b88__ = (__temp_b86__ + 5*__temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_c82__ = (5*__temp_a87__+ 12*__temp_c81__); float __temp_c84__ = (__temp_c82__ + 15*__temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+3)))]); float __temp_c86__ = (__temp_c84__ + 12*__temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_c88__ = (__temp_c86__ + 5*__temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_d82__ = (5*__temp_b87__+ 12*__temp_d81__); float __temp_d84__ = (__temp_d82__ + 15*__temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+3)))]); float __temp_d86__ = (__temp_d84__ + 12*__temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+3))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(0-(__iter_1__+3)))]); float __temp_d88__ = (__temp_d86__ + 5*__temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); return SMemSize; } __global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__+1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_a19__; // iter 0 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__+ 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+0)))] = __temp_b19__; // iter 0 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__+ 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(0-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(0-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(0-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+0)))] = __temp_a19__; // iter 0 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(0-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(0-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1); for(; __iter_6__ < (FORMA_MAX((__iter_1__+1),1)+2); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))-1; for(; __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1); for(; __iter_7__ < FORMA_MAX((__iter_0__-1),1); __iter_7__++) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__+1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2))) { int __iter_7__ = FORMA_MIN(((__iter_0__+GAPX+1)),(M-2)); for(; __iter_7__ < FORMA_MIN(((__iter_0__+GAPX+3)),(M-2)); __iter_7__++){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__+2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(0-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(0-(__iter_1__+0)))] = __temp_a49__; // iter 2 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(0-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1-(__iter_1__+0)))] = __temp_b49__; // iter 3 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(0-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2-(__iter_1__+0)))] = __temp_c49__; // iter 4 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(0-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(0-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3-(__iter_1__+0)))] = __temp_d49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1); for(; __iter_12__ < (FORMA_MAX((__iter_1__+2),1)+2); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))-1); for(; __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1); for(; __iter_13__ < FORMA_MAX((__iter_0__-2),1); __iter_13__++) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__+2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2))) { int __iter_13__ = FORMA_MIN((__iter_0__+GAPX+2),(M-2)); for(; __iter_13__ < FORMA_MIN((__iter_0__+GAPX+4),(M-2)) ; __iter_13__++){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__+3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(0-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(0-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(0-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(0-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__+ 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(0-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(0-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1); for(; __iter_18__ < (FORMA_MAX((__iter_1__+3),1)+2); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))-1; for(; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1); for(; __iter_19__ < FORMA_MAX((__iter_0__-3),1); __iter_19__++) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__+3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2))) { int __iter_19__ = FORMA_MIN(((__iter_0__+GAPX+3)),(M-2)); for(; __iter_19__ < FORMA_MIN(((__iter_0__+GAPX+5)),(M-2)) ; __iter_19__++ ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(0-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__+4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(0-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(0-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(0-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(0-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(0-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } } __global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__-2,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } else if( __iter_2__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; } } __syncthreads(); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__-1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__ + 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1); for(; __iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2); __iter_7__++) { __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2))) { int __iter_7__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-1); for(; __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ; __iter_7__++){ __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1); for(; __iter_6__ < FORMA_MAX((__iter_1__-1),1); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MIN(((__iter_1__+GAPY+1)),(N-2)); for(; __iter_6__ < FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ __tilevar_3__[__iter_7__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__-2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; // iter 2 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c49__; // iter 3 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d49__; } } else if( __iter_10__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1); for(; __iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2); __iter_13__++) { __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))) { int __iter_13__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-1); for(; __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ; __iter_13__++){ __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)] = __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1); for(; __iter_12__ < FORMA_MAX((__iter_1__-2),1); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MIN(((__iter_1__+GAPY+2)),(N-2)); for(; __iter_12__ < FORMA_MIN(((__iter_1__+GAPY+4)),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ __tilevar_4__[__iter_13__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__-3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ //iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ //iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1); for(; __iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2); __iter_19__++) { __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_19__ = (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-1); for(; __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ; __iter_19__++ ){ __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)] = __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1); for(; __iter_18__ < FORMA_MAX((__iter_1__-3),1); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)); for(; __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ __tilevar_5__[__iter_19__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__-4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } else if( __iter_22__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; } } } __global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, float * __restrict__ __copy_arr_t0__, float * __restrict__ __copy_arr_t1__, float * __restrict__ __copy_arr_t2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0))); float * __tilevar_2__ = __tilevar_0__; float * __tilevar_3__ = __tilevar_1__; float * __tilevar_4__ = __tilevar_0__; float * __tilevar_5__ = __tilevar_1__; int __iter_0__; __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X) + GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__; __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y) + GAPY) + (int)(FORMA_BLOCKDIM_Y); int __iter_2__; __iter_2__ = FORMA_MAX(__iter_1__-2,0) + 4*(int)(threadIdx.y) ; if( __iter_2__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(2)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+2)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(3)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+3)]; } } else if( __iter_2__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-1)) ){ int __iter_3__; __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ; if( __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1)) ){ __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)]; __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(1)+(EXTENT-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__+1)]; } } __syncthreads (); int __iter_4__; __iter_4__ = FORMA_MAX((__iter_1__-1),1) + 4*(int)(threadIdx.y) ; if( __iter_4__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; // iter 2 : __iter_4__ + 2 float __temp_c5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c6__ = (5 * __temp_a17__ + 12 * __temp_c5__); float __temp_c10__ = (__temp_c6__ + 15 * __temp_b17__); float __temp_c13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c14__ = (__temp_c10__ + 12 * __temp_c13__); float __temp_c17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c18__ = (__temp_c14__ + 5 * __temp_c17__); float __temp_c19__ = (__temp_c18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c19__; // iter 3 : __iter_4__ + 3 float __temp_d5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d6__ = (5 * __temp_b17__ + 12 * __temp_d5__); float __temp_d10__ = (__temp_d6__ + 15 * __temp_c17__); float __temp_d13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d14__ = (__temp_d10__ + 12 * __temp_d13__); float __temp_d17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d18__ = (__temp_d14__ + 5 * __temp_d17__); float __temp_d19__ = (__temp_d18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d19__; } } else if( __iter_4__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(N-2)) ){ int __iter_5__; __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){ // iter 0 : __iter_4__ float __temp_a2__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a6__ = (5 * __temp_a2__ + 12 * __temp_a5__); float __temp_a9__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a10__ = (__temp_a6__ + 15 * __temp_a9__); float __temp_a13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))]); float __temp_a14__ = (__temp_a10__ + 12 * __temp_a13__); float __temp_a17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a18__ = (__temp_a14__ + 5 * __temp_a17__); float __temp_a19__ = (__temp_a18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(EXTENT-(__iter_1__+0)))] = __temp_a19__; // iter 1 : __iter_4__ + 1 float __temp_b5__ = (__tilevar_2__[__iter_5__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b6__ = (5 * __temp_a9__ + 12 * __temp_b5__); float __temp_b10__ = (__temp_b6__ + 15 * __temp_a17__); float __temp_b13__ = (__tilevar_2__[__iter_5__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b14__ = (__temp_b10__ + 12 * __temp_b13__); float __temp_b17__ = (__tilevar_2__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b18__ = (__temp_b14__ + 5 * __temp_b17__); float __temp_b19__ = (__temp_b18__ / 118); __tilevar_3__[__iter_5__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b19__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1); for(; __iter_6__ < FORMA_MAX((__iter_1__-1),1); __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 1) { int __iter_6__ = FORMA_MIN(((__iter_1__+GAPY+1)),(N-2)); for(; __iter_6__ < FORMA_MIN(((__iter_1__+GAPY+3)),(N-2)) ; __iter_6__++) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 2) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_7__ = FORMA_MAX((__iter_0__-3),1); for(; __iter_7__ < FORMA_MAX((__iter_0__-1),1); __iter_7__++) { __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } else if (threadIdx.y == 3) { int __iter_6__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.x) ; if(__iter_6__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))) { int __iter_7__ = FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2))+1; for(; __iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ; __iter_7__++){ __tilevar_3__[__iter_7__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(EXTENT-(__iter_1__+0)))] = __copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)]; } } } __syncthreads(); int __iter_10__; __iter_10__ = FORMA_MAX((__iter_1__-2),1) + 4*(int)(threadIdx.y) ; if( __iter_10__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; // iter 2 : __iter_10__ + 2 float __temp_c35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c36__ = (5 * __temp_a47__ + 12 * __temp_c35__); float __temp_c40__ = (__temp_c36__ + 15 * __temp_b47__); float __temp_c43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c44__ = (__temp_c40__ + 12 * __temp_c43__); float __temp_c47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c48__ = (__temp_c44__ + 5 * __temp_c47__); float __temp_c49__ = (__temp_c48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c49__; // iter 3 : __iter_10__ + 3 float __temp_d35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d36__ = (5 * __temp_b47__ + 12 * __temp_d35__); float __temp_d40__ = (__temp_d36__ + 15 * __temp_c47__); float __temp_d43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d44__ = (__temp_d40__ + 12 * __temp_d43__); float __temp_d47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d48__ = (__temp_d44__ + 5 * __temp_d47__); float __temp_d49__ = (__temp_d48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d49__; } } else if( __iter_10__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2)) ){ int __iter_11__; __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){ // iter 0 : __iter_10__ float __temp_a32__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*( __iter_10__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a36__ = (5 * __temp_a32__ + 12 * __temp_a35__); float __temp_a39__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a40__ = (__temp_a36__ + 15 * __temp_a39__); float __temp_a43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))]); float __temp_a44__ = (__temp_a40__ + 12 * __temp_a43__); float __temp_a47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a48__ = (__temp_a44__ + 5 * __temp_a47__); float __temp_a49__ = (__temp_a48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(EXTENT-(__iter_1__+0)))] = __temp_a49__; // iter 1 : __iter_10__ + 1 float __temp_b35__ = (__tilevar_3__[__iter_11__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b36__ = (5 * __temp_a39__ + 12 * __temp_b35__); float __temp_b40__ = (__temp_b36__ + 15 * __temp_a47__); float __temp_b43__ = (__tilevar_3__[ __iter_11__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b44__ = (__temp_b40__ + 12 * __temp_b43__); float __temp_b47__ = (__tilevar_3__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b48__ = (__temp_b44__ + 5 * __temp_b47__); float __temp_b49__ = (__temp_b48__ / 118); __tilevar_4__[__iter_11__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_10__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b49__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1); for(; __iter_12__ < FORMA_MAX((__iter_1__-2),1); __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 1) { int __iter_12__ = FORMA_MIN(((__iter_1__+GAPY+2)-1),(N-2))+1; for(; __iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ; __iter_12__++) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 2) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2))) { int __iter_13__ = FORMA_MAX((__iter_0__-4),1); for(; __iter_13__ < FORMA_MAX((__iter_0__-2),1); __iter_13__++) { __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } else if (threadIdx.y == 3) { int __iter_12__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.x) ; if(__iter_12__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2))) { int __iter_13__ = FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2))+1; for(; __iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ; __iter_13__++){ __tilevar_4__[__iter_13__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(EXTENT-(__iter_1__+0)))] = __copy_arr_1__[__iter_13__+(M-0)*(__iter_12__)]; } } } __syncthreads(); int __iter_16__; __iter_16__ = FORMA_MAX((__iter_1__-3),1) + 4*(int)(threadIdx.y) ; if( __iter_16__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; // iter 2 : __iter_16__ + 2 float __temp_c61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c62__ = (5 * __temp_a67__ + 12 * __temp_c61__); float __temp_c64__ = (__temp_c62__ + 15 * __temp_b67__); float __temp_c65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c66__ = (__temp_c64__ + 12 * __temp_c65__); float __temp_c67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c68__ = (__temp_c66__ + 5 * __temp_c67__); float __temp_c69__ = (__temp_c68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))] = __temp_c69__; // iter 3 : __iter_16__ + 3 float __temp_d61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d62__ = (5 * __temp_b67__ + 12 * __temp_d61__); float __temp_d64__ = (__temp_d62__ + 15 * __temp_c67__); float __temp_d65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d66__ = (__temp_d64__ + 12 * __temp_d65__); float __temp_d67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d68__ = (__temp_d66__ + 5 * __temp_d67__); float __temp_d69__ = (__temp_d68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(3)+(EXTENT-(__iter_1__+0)))] = __temp_d69__; } } else if( __iter_16__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2)) ){ int __iter_17__; __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ; if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){ // iter 0 : __iter_16__ float __temp_a60__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a62__ = (5 * __temp_a60__ + 12 * __temp_a61__); float __temp_a63__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a64__ = (__temp_a62__ + 15 * __temp_a63__); float __temp_a65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))]); float __temp_a66__ = (__temp_a64__ + 12 * __temp_a65__); float __temp_a67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a68__ = (__temp_a66__ + 5 * __temp_a67__); float __temp_a69__ = (__temp_a68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(EXTENT-(__iter_1__+0)))] = __temp_a69__; // iter 1 : __iter_16__ + 1 float __temp_b61__ = (__tilevar_4__[__iter_17__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b62__ = (5 * __temp_a63__ + 12 * __temp_b61__); float __temp_b64__ = (__temp_b62__ + 15 * __temp_a67__); float __temp_b65__ = (__tilevar_4__[__iter_17__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b66__ = (__temp_b64__ + 12 * __temp_b65__); float __temp_b67__ = (__tilevar_4__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b68__ = (__temp_b66__ + 5 * __temp_b67__); float __temp_b69__ = (__temp_b68__ / 118); __tilevar_5__[__iter_17__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_16__+(1)+(EXTENT-(__iter_1__+0)))] = __temp_b69__; } } __syncthreads (); if (threadIdx.y == 0) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1); for(; __iter_18__ < FORMA_MAX((__iter_1__-3),1); __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 1) { int __iter_18__ = FORMA_MIN(((__iter_1__+GAPY+3)-1),(N-2))+1; for(; __iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2)) ; __iter_18__++) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ; if( __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 2) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2))) { int __iter_19__ = FORMA_MAX((__iter_0__-5),1); for(; __iter_19__ < FORMA_MAX((__iter_0__-3),1); __iter_19__++) { __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } else if (threadIdx.y == 3) { int __iter_18__ = FORMA_MAX((__iter_1__-5),1) + (int)(threadIdx.x) ; if(__iter_18__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(N-2))) { int __iter_19__ = FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2))+1; for(; __iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) ; __iter_19__++){ __tilevar_5__[__iter_19__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_18__+(EXTENT-(__iter_1__+0)))] = __copy_arr_2__[__iter_19__+(M-0)*(__iter_18__)]; } } } __syncthreads(); int __iter_22__; __iter_22__ = FORMA_MAX((__iter_1__-4),1) + 4*(int)(threadIdx.y) ; if( __iter_22__ + 3 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; // iter 2 : __iter_22__ + 2 float __temp_c81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c82__ = (5 * __temp_a87__ + 12 * __temp_c81__); float __temp_c84__ = (__temp_c82__ + 15 * __temp_b87__); float __temp_c85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_c86__ = (__temp_c84__ + 12 * __temp_c85__); float __temp_c87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_c88__ = (__temp_c86__ + 5 * __temp_c87__); float __temp_c89__ = (__temp_c88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+2)] = __temp_c89__; // iter 3 : __iter_22__ + 3 float __temp_d81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d82__ = (5 * __temp_b87__ + 12 * __temp_d81__); float __temp_d84__ = (__temp_d82__ + 15 * __temp_c87__); float __temp_d85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(3)+(EXTENT-(__iter_1__+0)))]); float __temp_d86__ = (__temp_d84__ + 12 * __temp_d85__); float __temp_d87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(4)+(EXTENT-(__iter_1__+0)))]); float __temp_d88__ = (__temp_d86__ + 5 * __temp_d87__); float __temp_d89__ = (__temp_d88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+3)] = __temp_d89__; } } else if( __iter_22__ + 1 <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(N-2)) ){ int __iter_23__; __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ; if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){ // iter 0 : __iter_22__ float __temp_a80__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(-1)+(EXTENT-(__iter_1__+0)))]); float __temp_a81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a82__ = (5 * __temp_a80__ + 12 * __temp_a81__); float __temp_a83__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a84__ = (__temp_a82__ + 15 * __temp_a83__); float __temp_a85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(EXTENT-(__iter_1__+0)))]); float __temp_a86__ = (__temp_a84__ + 12 * __temp_a85__); float __temp_a87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_a88__ = (__temp_a86__ + 5 * __temp_a87__); float __temp_a89__ = (__temp_a88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__)] = __temp_a89__; // iter 1 : __iter_22__ + 1 float __temp_b81__ = (__tilevar_5__[__iter_23__+(-1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b82__ = (5 * __temp_a83__ + 12 * __temp_b81__); float __temp_b84__ = (__temp_b82__ + 15 * __temp_a87__); float __temp_b85__ = (__tilevar_5__[__iter_23__+(1)+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(1)+(EXTENT-(__iter_1__+0)))]); float __temp_b86__ = (__temp_b84__ + 12 * __temp_b85__); float __temp_b87__ = (__tilevar_5__[__iter_23__+(EXTENT-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(2)+(EXTENT-(__iter_1__+0)))]); float __temp_b88__ = (__temp_b86__ + 5 * __temp_b87__); float __temp_b89__ = (__temp_b88__ / 118); __var_1__[__iter_23__+(M-0)*(__iter_22__+1)] = __temp_b89__; } } } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; cudaMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; cudaMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; cudaMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); float * __copy_arr_t0__; cudaMalloc(&__copy_arr_t0__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t0__\n"); float * __copy_arr_t1__; cudaMalloc(&__copy_arr_t1__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t1__\n"); float * __copy_arr_t2__; cudaMalloc(&__copy_arr_t2__,sizeof(float)*((N-0)*(M-0))); Check_CUDA_Error("Allocation Error!! : __copy_arr_t2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1; int __max_occupancy_blocksize___kernel___forma_kernel__0__; int _max_occupancy_gridsize___kernel___forma_kernel__0__; cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0); int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2)); __max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32; int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__; int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__; int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),9); __max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){ if( __blockConfig___kernel___forma_kernel__0__.y/2 > 9) __blockConfig___kernel___forma_kernel__0__.y /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__) break; if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,9)) __blockConfig___kernel___forma_kernel__0__.x /= 2; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); } int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); dim3 unrollConfig(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n"); dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); __kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y); __kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __copy_arr_t0__, __copy_arr_t1__, __copy_arr_t2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__copy_arr_0__); cudaFree(__copy_arr_1__); cudaFree(__copy_arr_2__); } /*Host Free End*/
2af25948f2d361a9e50757d4dcd0876989e37f77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <hip/device_functions.h> #include "device_launch_parameters.h" #include <cstdlib> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hiprand/hiprand.h> #include <math.h> __global__ void Pi_GPU(float *x, float *y, int *totalCounts, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // int threadCount = gridDim.x * blockDim.x; //c int countPoints = 0; for (int i = idx; i < N; i += threadCount) { if (x[i] * x[i] + y[i] * y[i] < 1) { countPoints++; } } atomicAdd(totalCounts, countPoints); // } float PI_CPU(float *x, float *y, int N) { int countPoints = 0; //- for (int i = 0; i < N; i++) { if (x[i] * x[i] + y[i] * y[i] < 1) { countPoints++; } } return float(countPoints) * 4 / N; } int main(){ // const long long N = 20000000; // CPU float *X, *Y, *devX, *devY; X = (float *)calloc(N, sizeof(float)); Y = (float *)calloc(N, sizeof(float)); // GPU hipMalloc((void **)&devX, N * sizeof(float)); hipMalloc((void **)&devY, N * sizeof(float)); // hiprandGenerator_t curandGenerator; hiprandCreateGenerator(&curandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(curandGenerator, 1234ULL); // hiprandGenerateUniform(curandGenerator, devX, N); hiprandGenerateUniform(curandGenerator, devY, N); hiprandDestroyGenerator(curandGenerator); // GPU CPU hipMemcpy(X, devX, N * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Y, devY, N * sizeof(float), hipMemcpyDeviceToHost); int blockDim = 512; dim3 threads(blockDim, 1); dim3 grid(N / (128 * blockDim), 1); int *gpu_total_counts = 0; int*gpu_total_counts_host = (int *)calloc(1, sizeof(int)); hipMalloc((void **)&gpu_total_counts, 512 * sizeof(int)); // event' GPU float gpuTime = 0; hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); // GPU hipEventRecord(start, 0); Pi_GPU << <grid, threads >> >(devX, devY, gpu_total_counts, N); // GPU CPU hipMemcpy(gpu_total_counts_host, gpu_total_counts, sizeof(int), hipMemcpyDeviceToHost); // GPU float gpu_result = (float) *gpu_total_counts_host * 4 / N; // hipEventRecord(stop, 0); // hipEventSynchronize(stop); // GPU hipEventElapsedTime(&gpuTime, start, stop); std::cout << "GPU time " << gpuTime << " Result: " << gpu_result << std::endl; // GPU hipEventDestroy(start); hipEventDestroy(stop); hipFree(devX); hipFree(devY); hipFree(gpu_total_counts); clock_t start_time = clock(); float cpu_result = PI_CPU(X, Y, N); clock_t end_time = clock(); std::cout << "CPU time " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " Result : " << cpu_result << std::endl; // CPU delete X; delete Y; return 0; }
2af25948f2d361a9e50757d4dcd0876989e37f77.cu
#include "cuda_runtime.h" #include <curand_kernel.h> #include <device_functions.h> #include "device_launch_parameters.h" #include <cstdlib> #include <cuda.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <curand.h> #include <math.h> __global__ void Pi_GPU(float *x, float *y, int *totalCounts, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // номер элемента int threadCount = gridDim.x * blockDim.x; //cмещение int countPoints = 0; for (int i = idx; i < N; i += threadCount) { if (x[i] * x[i] + y[i] * y[i] < 1) { countPoints++; } } atomicAdd(totalCounts, countPoints); // каждый поток суммирует в переменную } float PI_CPU(float *x, float *y, int N) { int countPoints = 0; //Кол-во точек в круге for (int i = 0; i < N; i++) { if (x[i] * x[i] + y[i] * y[i] < 1) { countPoints++; } } return float(countPoints) * 4 / N; } int main(){ // Количество точек const long long N = 20000000; // Выделяем память для храния данных на CPU float *X, *Y, *devX, *devY; X = (float *)calloc(N, sizeof(float)); Y = (float *)calloc(N, sizeof(float)); //Выделяем память для храния данных на GPU cudaMalloc((void **)&devX, N * sizeof(float)); cudaMalloc((void **)&devY, N * sizeof(float)); //создаем новый генератор curandGenerator_t curandGenerator; curandCreateGenerator(&curandGenerator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(curandGenerator, 1234ULL); // генерируем числа curandGenerateUniform(curandGenerator, devX, N); curandGenerateUniform(curandGenerator, devY, N); curandDestroyGenerator(curandGenerator); //Копируем заполненные вектора с GPU на CPU cudaMemcpy(X, devX, N * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Y, devY, N * sizeof(float), cudaMemcpyDeviceToHost); int blockDim = 512; dim3 threads(blockDim, 1); dim3 grid(N / (128 * blockDim), 1); int *gpu_total_counts = 0; int*gpu_total_counts_host = (int *)calloc(1, sizeof(int)); cudaMalloc((void **)&gpu_total_counts, 512 * sizeof(int)); //Создаем event'ы для замера времени работы GPU float gpuTime = 0; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); //старт расчетов на GPU cudaEventRecord(start, 0); Pi_GPU << <grid, threads >> >(devX, devY, gpu_total_counts, N); //Копируем результат с GPU на CPU cudaMemcpy(gpu_total_counts_host, gpu_total_counts, sizeof(int), cudaMemcpyDeviceToHost); //число пи на GPU float gpu_result = (float) *gpu_total_counts_host * 4 / N; //Отмечаем окончание расчета cudaEventRecord(stop, 0); //Синхронизируемя с моментом окончания расчетов cudaEventSynchronize(stop); //Рассчитываем время работы GPU cudaEventElapsedTime(&gpuTime, start, stop); std::cout << "GPU time " << gpuTime << " Result: " << gpu_result << std::endl; //Чистим ресурсы на GPU cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(devX); cudaFree(devY); cudaFree(gpu_total_counts); clock_t start_time = clock(); float cpu_result = PI_CPU(X, Y, N); clock_t end_time = clock(); std::cout << "CPU time " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " Result : " << cpu_result << std::endl; //Чистим память на CPU delete X; delete Y; return 0; }
893dd5febf7323b243286fbbbd22c13b2c409d9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include <optix.h> #include "system_data.h" #include "per_ray_data.h" #include "vertex_attributes.h" #include "function_indices.h" #include "material_definition.h" #include "light_definition.h" #include "shader_common.h" #include "transform.h" #include "random_number_generators.h" extern "C" __constant__ SystemData sysData; extern "C" __global__ void __closesthit__edf_diffuse() { GeometryInstanceData theData = sysData.geometryInstanceData[optixGetInstanceId()]; // Cast the hipDeviceptr_t to the actual format for Triangles geometry. const unsigned int thePrimitiveIndex = optixGetPrimitiveIndex(); const uint3* indices = reinterpret_cast<uint3*>(theData.indices); const uint3 tri = indices[thePrimitiveIndex]; const TriangleAttributes* attributes = reinterpret_cast<TriangleAttributes*>(theData.attributes); const TriangleAttributes& attr0 = attributes[tri.x]; const TriangleAttributes& attr1 = attributes[tri.y]; const TriangleAttributes& attr2 = attributes[tri.z]; const float2 theBarycentrics = optixGetTriangleBarycentrics(); // beta and gamma const float alpha = 1.0f - theBarycentrics.x - theBarycentrics.y; // PERF This State lies in memory. It's more efficient to hold the data in registers. // Problem is that more advanced material systems need the State all the time. State state; // All in world space coordinates! state.normalGeo = cross(attr1.vertex - attr0.vertex, attr2.vertex - attr0.vertex); //state.tangent = attr0.tangent * alpha + attr1.tangent * theBarycentrics.x + attr2.tangent * theBarycentrics.y; // PERF tangent is not used in this shader. state.normal = attr0.normal * alpha + attr1.normal * theBarycentrics.x + attr2.normal * theBarycentrics.y; state.texcoord = attr0.texcoord * alpha + attr1.texcoord * theBarycentrics.x + attr2.texcoord * theBarycentrics.y; float4 objectToWorld[3]; float4 worldToObject[3]; getTransforms(optixGetTransformListHandle(0), objectToWorld, worldToObject); // Single instance level transformation list only. state.normalGeo = normalize(transformNormal(worldToObject, state.normalGeo)); //state.tangent = normalize(transformVector(objectToWorld, state.tangent)); state.normal = normalize(transformNormal(worldToObject, state.normal)); // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); thePrd->distance = optixGetRayTmax(); // Return the current path segment distance, needed for absorption calculations in the integrator. //thePrd->pos = optixGetWorldRayOrigin() + optixGetWorldRayDirection() * optixGetRayTmax(); thePrd->pos += thePrd->wi * thePrd->distance; // DEBUG Check which version is more efficient. // Explicitly include edge-on cases as frontface condition! // Keeps the material stack from overflowing at silhouettes. // Prevents that silhouettes of thin-walled materials use the backface material. // Using the true geometry normal attribute as originally defined on the frontface! thePrd->flags |= (0.0f <= dot(thePrd->wo, state.normalGeo)) ? FLAG_FRONTFACE : 0; if ((thePrd->flags & FLAG_FRONTFACE) == 0) // Looking at the backface? { // Means geometric normal and shading normal are always defined on the side currently looked at. // This gives the backfaces of opaque BSDFs a defined result. state.normalGeo = -state.normalGeo; //state.tangent = -state.tangent; state.normal = -state.normal; // Explicitly DO NOT recalculate the frontface condition! } thePrd->radiance = make_float3(0.0f); // When hitting a geometric light, evaluate the emission first, because this needs the previous diffuse hit's pdf. const int idLight = theData.idLight; if (0 <= idLight && (thePrd->flags & FLAG_FRONTFACE)) // This material is emissive and we're looking at the front face. { const float cosTheta = dot(thePrd->wo, state.normalGeo); if (DENOMINATOR_EPSILON < cosTheta) { const LightDefinition& light = sysData.lightDefinitions[idLight]; float3 emission = make_float3(1.0f); // Neutral factor. if (light.textureEmission) { emission = make_float3(tex2D<float4>(light.textureEmission, state.texcoord.x, state.texcoord.y)); } if (sysData.directLighting && (thePrd->flags & FLAG_DIFFUSE)) { float pdfLight = (thePrd->distance * thePrd->distance) / (light.area * cosTheta); if (light.typeLight == TYPE_LIGHT_RECT && light.textureEmission) { pdfLight *= intensity(emission) / light.integral; // This must be the emission from the texture only! } // If it's an implicit light hit from a diffuse scattering event and // the light emission was not returning a zero pdf (e.g. backface or edge on). // FIXME PERF The light emission pdf cannot be zero here because we're hitting the front face. // (Wouldn't matter for the balance heuristic anway, if b == 0.0 the result is always 1.0.) if (DENOMINATOR_EPSILON < pdfLight) { // Scale the emission with the heuristic between the previous diffuse BSDF sample pdf and this implicit light sample pdf. emission *= balanceHeuristic(thePrd->pdf, pdfLight); } } thePrd->radiance = emission * light.emission; } } // Start fresh with the next BSDF sample. (Either of these values remaining zero is an end-of-path condition.) // The pdf of the previous event was needed for the emission calculation above. thePrd->f_over_pdf = make_float3(0.0f); thePrd->pdf = 0.0f; thePrd->flags |= FLAG_TERMINATE; }
893dd5febf7323b243286fbbbd22c13b2c409d9c.cu
/* * Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include <optix.h> #include "system_data.h" #include "per_ray_data.h" #include "vertex_attributes.h" #include "function_indices.h" #include "material_definition.h" #include "light_definition.h" #include "shader_common.h" #include "transform.h" #include "random_number_generators.h" extern "C" __constant__ SystemData sysData; extern "C" __global__ void __closesthit__edf_diffuse() { GeometryInstanceData theData = sysData.geometryInstanceData[optixGetInstanceId()]; // Cast the CUdeviceptr to the actual format for Triangles geometry. const unsigned int thePrimitiveIndex = optixGetPrimitiveIndex(); const uint3* indices = reinterpret_cast<uint3*>(theData.indices); const uint3 tri = indices[thePrimitiveIndex]; const TriangleAttributes* attributes = reinterpret_cast<TriangleAttributes*>(theData.attributes); const TriangleAttributes& attr0 = attributes[tri.x]; const TriangleAttributes& attr1 = attributes[tri.y]; const TriangleAttributes& attr2 = attributes[tri.z]; const float2 theBarycentrics = optixGetTriangleBarycentrics(); // beta and gamma const float alpha = 1.0f - theBarycentrics.x - theBarycentrics.y; // PERF This State lies in memory. It's more efficient to hold the data in registers. // Problem is that more advanced material systems need the State all the time. State state; // All in world space coordinates! state.normalGeo = cross(attr1.vertex - attr0.vertex, attr2.vertex - attr0.vertex); //state.tangent = attr0.tangent * alpha + attr1.tangent * theBarycentrics.x + attr2.tangent * theBarycentrics.y; // PERF tangent is not used in this shader. state.normal = attr0.normal * alpha + attr1.normal * theBarycentrics.x + attr2.normal * theBarycentrics.y; state.texcoord = attr0.texcoord * alpha + attr1.texcoord * theBarycentrics.x + attr2.texcoord * theBarycentrics.y; float4 objectToWorld[3]; float4 worldToObject[3]; getTransforms(optixGetTransformListHandle(0), objectToWorld, worldToObject); // Single instance level transformation list only. state.normalGeo = normalize(transformNormal(worldToObject, state.normalGeo)); //state.tangent = normalize(transformVector(objectToWorld, state.tangent)); state.normal = normalize(transformNormal(worldToObject, state.normal)); // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); thePrd->distance = optixGetRayTmax(); // Return the current path segment distance, needed for absorption calculations in the integrator. //thePrd->pos = optixGetWorldRayOrigin() + optixGetWorldRayDirection() * optixGetRayTmax(); thePrd->pos += thePrd->wi * thePrd->distance; // DEBUG Check which version is more efficient. // Explicitly include edge-on cases as frontface condition! // Keeps the material stack from overflowing at silhouettes. // Prevents that silhouettes of thin-walled materials use the backface material. // Using the true geometry normal attribute as originally defined on the frontface! thePrd->flags |= (0.0f <= dot(thePrd->wo, state.normalGeo)) ? FLAG_FRONTFACE : 0; if ((thePrd->flags & FLAG_FRONTFACE) == 0) // Looking at the backface? { // Means geometric normal and shading normal are always defined on the side currently looked at. // This gives the backfaces of opaque BSDFs a defined result. state.normalGeo = -state.normalGeo; //state.tangent = -state.tangent; state.normal = -state.normal; // Explicitly DO NOT recalculate the frontface condition! } thePrd->radiance = make_float3(0.0f); // When hitting a geometric light, evaluate the emission first, because this needs the previous diffuse hit's pdf. const int idLight = theData.idLight; if (0 <= idLight && (thePrd->flags & FLAG_FRONTFACE)) // This material is emissive and we're looking at the front face. { const float cosTheta = dot(thePrd->wo, state.normalGeo); if (DENOMINATOR_EPSILON < cosTheta) { const LightDefinition& light = sysData.lightDefinitions[idLight]; float3 emission = make_float3(1.0f); // Neutral factor. if (light.textureEmission) { emission = make_float3(tex2D<float4>(light.textureEmission, state.texcoord.x, state.texcoord.y)); } if (sysData.directLighting && (thePrd->flags & FLAG_DIFFUSE)) { float pdfLight = (thePrd->distance * thePrd->distance) / (light.area * cosTheta); if (light.typeLight == TYPE_LIGHT_RECT && light.textureEmission) { pdfLight *= intensity(emission) / light.integral; // This must be the emission from the texture only! } // If it's an implicit light hit from a diffuse scattering event and // the light emission was not returning a zero pdf (e.g. backface or edge on). // FIXME PERF The light emission pdf cannot be zero here because we're hitting the front face. // (Wouldn't matter for the balance heuristic anway, if b == 0.0 the result is always 1.0.) if (DENOMINATOR_EPSILON < pdfLight) { // Scale the emission with the heuristic between the previous diffuse BSDF sample pdf and this implicit light sample pdf. emission *= balanceHeuristic(thePrd->pdf, pdfLight); } } thePrd->radiance = emission * light.emission; } } // Start fresh with the next BSDF sample. (Either of these values remaining zero is an end-of-path condition.) // The pdf of the previous event was needed for the emission calculation above. thePrd->f_over_pdf = make_float3(0.0f); thePrd->pdf = 0.0f; thePrd->flags |= FLAG_TERMINATE; }
cc094c4524893134dabeb8d530b52fd2f4ad1080.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zjacobisetup.cu normal z -> d, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "magmasparse.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void dvjacobisetup_gpu( int num_rows, int num_vecs, double *b, double *d, double *c, double *x){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ){ c[row+i*num_rows] = b[row+i*num_rows] / d[row]; x[row+i*num_rows] = c[row+i*num_rows]; } } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_vector RHS b @param[in] d magma_d_vector vector with diagonal entries @param[out] c magma_d_vector* c = D^(-1) * b @param[out] x magma_d_vector* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_djacobisetup_vector_gpu( int num_rows, magma_d_vector b, magma_d_vector d, magma_d_vector c, magma_d_vector *x, magma_queue_t queue ) { dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); int num_vecs = b.num_rows / num_rows; magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dvjacobisetup_gpu), dim3(grid), dim3(threads), 0 , 0, num_rows, num_vecs, b.dval, d.dval, c.dval, x->val ); return MAGMA_SUCCESS; } __global__ void djacobidiagscal_kernel( int num_rows, int num_vecs, double *b, double *d, double *c){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) c[row+i*num_rows] = b[row+i*num_rows] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_vector RHS b @param[in] d magma_d_vector vector with diagonal entries @param[out] c magma_d_vector* c = D^(-1) * b @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobi_diagscal( int num_rows, magma_d_vector d, magma_d_vector b, magma_d_vector *c, magma_queue_t queue ) { dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); int num_vecs = b.num_rows/num_rows; magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( djacobidiagscal_kernel), dim3(grid), dim3(threads), 0 , 0, num_rows, num_vecs, b.dval, d.dval, c->val ); return MAGMA_SUCCESS; }
cc094c4524893134dabeb8d530b52fd2f4ad1080.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zjacobisetup.cu normal z -> d, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "magmasparse.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void dvjacobisetup_gpu( int num_rows, int num_vecs, double *b, double *d, double *c, double *x){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ){ c[row+i*num_rows] = b[row+i*num_rows] / d[row]; x[row+i*num_rows] = c[row+i*num_rows]; } } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_vector RHS b @param[in] d magma_d_vector vector with diagonal entries @param[out] c magma_d_vector* c = D^(-1) * b @param[out] x magma_d_vector* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_djacobisetup_vector_gpu( int num_rows, magma_d_vector b, magma_d_vector d, magma_d_vector c, magma_d_vector *x, magma_queue_t queue ) { dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); int num_vecs = b.num_rows / num_rows; magma_int_t threads = BLOCK_SIZE; dvjacobisetup_gpu<<< grid, threads, 0 >>> ( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val ); return MAGMA_SUCCESS; } __global__ void djacobidiagscal_kernel( int num_rows, int num_vecs, double *b, double *d, double *c){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) c[row+i*num_rows] = b[row+i*num_rows] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_vector RHS b @param[in] d magma_d_vector vector with diagonal entries @param[out] c magma_d_vector* c = D^(-1) * b @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobi_diagscal( int num_rows, magma_d_vector d, magma_d_vector b, magma_d_vector *c, magma_queue_t queue ) { dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); int num_vecs = b.num_rows/num_rows; magma_int_t threads = BLOCK_SIZE; djacobidiagscal_kernel<<< grid, threads, 0 >>>( num_rows, num_vecs, b.dval, d.dval, c->val ); return MAGMA_SUCCESS; }
a6f670d28ce8f1ad693049a0ab3e407678db70a6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <time.h> #include <sys/time.h> #include <vector> #include <string> #include <iostream> #include <gtest/gtest.h> #include <utilities/error_utils.hpp> #include <cuspatial/soa_readers.hpp> #include <cuspatial/hausdorff.hpp> #include <utility/utility.hpp> #include "hausdorff_util.h" #include <tests/utilities/cudf_test_utils.cuh> #include <tests/utilities/cudf_test_fixtures.h> struct HausdorffCompare : public GdfTest { gdf_column pnt_x,pnt_y,cnt; size_t free_mem = 0, total_mem = 0; void set_initialize(const char *point_fn, const char *cnt_fn) { hipMemGetInfo(&free_mem, &total_mem); std::cout<<"GPU total_mem="<<total_mem<<std::endl; std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl; struct timeval t0,t1; gettimeofday(&t0, nullptr); auto points=cuspatial::read_xy_points_soa(point_fn); pnt_x=points.first; pnt_y=points.second; cnt=cuspatial::read_uint32_soa(cnt_fn); gettimeofday(&t1, nullptr); float data_load_time=cuspatial::calc_time("point/cnt data loading time=", t0,t1); CUDF_EXPECTS(pnt_x.size>0 && pnt_y.size>0 && cnt.size>=0,"invalid # of points/trajectories"); CUDF_EXPECTS(pnt_x.size==pnt_y.size, "x and y columns must have the same size"); CUDF_EXPECTS(pnt_y.size >=cnt.size ,"a point set must have at least one point"); } }; #if 0 // disable until data files are available TEST_F(HausdorffCompare, hausdorfftest) { //currently using hard coded paths; to be updated std::string point_fn =std::string("/home/jianting/trajcode/locust256.coor"); std::string cnt_fn =std::string("/home/jianting/trajcode/locust256.objcnt"); //initializaiton this->set_initialize(point_fn.c_str(),cnt_fn.c_str()); //run cuspatial::directed_hausdorff_distance twice struct timeval t0,t1; gettimeofday(&t0, nullptr); gdf_column dist=cuspatial::directed_hausdorff_distance(this->pnt_x,this->pnt_y, this->cnt); gettimeofday(&t1, nullptr); float gpu_hausdorff_time=cuspatial::calc_time("GPU Hausdorff Distance time......",t0,t1); int set_size=this->cnt.size; int num_pair=dist.size; assert(num_pair==set_size*set_size); std::cout<<"num_pair="<<num_pair<<std::endl; //transfer data to CPU and run on CPU int num_pnt=this->pnt_x.size; double *x_c=new double[num_pnt]; double *y_c=new double[num_pnt]; uint32_t *cnt_c=new uint32_t[set_size]; assert(x_c!=nullptr && y_c!=nullptr && cnt_c!=nullptr); hipMemcpy(x_c,this->pnt_x.data ,num_pnt*sizeof(double) , hipMemcpyDeviceToHost); hipMemcpy(y_c,this->pnt_y.data ,num_pnt*sizeof(double) , hipMemcpyDeviceToHost); hipMemcpy(cnt_c,this->cnt.data ,set_size*sizeof(uint32_t) , hipMemcpyDeviceToHost); //test only the first subset_size pairs on CPUs int subset_size=100; double *dist_c=nullptr; hausdorff_test_sequential<double>(subset_size,x_c,y_c,cnt_c,dist_c); assert(dist_c!=nullptr); double *dist_h=new double[num_pair]; hipMemcpy(dist_h,dist.data ,num_pair*sizeof(double) , hipMemcpyDeviceToHost); //verify the CPU results are the same as the two GPU results int diff_cnt=0 ; for(int i=0;i<subset_size;i++) { for(int j=0;j<subset_size;j++) { int p1=i*subset_size+j; int p2=i*set_size+j; if(fabs(dist_c[p1]-dist_h[p2])>0.00001) { //std::cout<<"diff:("<<i<<","<<j<<") "<<dist_c[p1]<<" "<<dist_h[p2]<<std::endl; diff_cnt++; } } } if(diff_cnt==0) std::cout<<"GPU and CPU results are identical...................OK"<<std::endl; else std::cout<<"# of GPU and CPU diffs="<<diff_cnt<<std::endl; hipMemGetInfo(&this->free_mem, &this->total_mem); std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl; } #endif
a6f670d28ce8f1ad693049a0ab3e407678db70a6.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <time.h> #include <sys/time.h> #include <vector> #include <string> #include <iostream> #include <gtest/gtest.h> #include <utilities/error_utils.hpp> #include <cuspatial/soa_readers.hpp> #include <cuspatial/hausdorff.hpp> #include <utility/utility.hpp> #include "hausdorff_util.h" #include <tests/utilities/cudf_test_utils.cuh> #include <tests/utilities/cudf_test_fixtures.h> struct HausdorffCompare : public GdfTest { gdf_column pnt_x,pnt_y,cnt; size_t free_mem = 0, total_mem = 0; void set_initialize(const char *point_fn, const char *cnt_fn) { cudaMemGetInfo(&free_mem, &total_mem); std::cout<<"GPU total_mem="<<total_mem<<std::endl; std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl; struct timeval t0,t1; gettimeofday(&t0, nullptr); auto points=cuspatial::read_xy_points_soa(point_fn); pnt_x=points.first; pnt_y=points.second; cnt=cuspatial::read_uint32_soa(cnt_fn); gettimeofday(&t1, nullptr); float data_load_time=cuspatial::calc_time("point/cnt data loading time=", t0,t1); CUDF_EXPECTS(pnt_x.size>0 && pnt_y.size>0 && cnt.size>=0,"invalid # of points/trajectories"); CUDF_EXPECTS(pnt_x.size==pnt_y.size, "x and y columns must have the same size"); CUDF_EXPECTS(pnt_y.size >=cnt.size ,"a point set must have at least one point"); } }; #if 0 // disable until data files are available TEST_F(HausdorffCompare, hausdorfftest) { //currently using hard coded paths; to be updated std::string point_fn =std::string("/home/jianting/trajcode/locust256.coor"); std::string cnt_fn =std::string("/home/jianting/trajcode/locust256.objcnt"); //initializaiton this->set_initialize(point_fn.c_str(),cnt_fn.c_str()); //run cuspatial::directed_hausdorff_distance twice struct timeval t0,t1; gettimeofday(&t0, nullptr); gdf_column dist=cuspatial::directed_hausdorff_distance(this->pnt_x,this->pnt_y, this->cnt); gettimeofday(&t1, nullptr); float gpu_hausdorff_time=cuspatial::calc_time("GPU Hausdorff Distance time......",t0,t1); int set_size=this->cnt.size; int num_pair=dist.size; assert(num_pair==set_size*set_size); std::cout<<"num_pair="<<num_pair<<std::endl; //transfer data to CPU and run on CPU int num_pnt=this->pnt_x.size; double *x_c=new double[num_pnt]; double *y_c=new double[num_pnt]; uint32_t *cnt_c=new uint32_t[set_size]; assert(x_c!=nullptr && y_c!=nullptr && cnt_c!=nullptr); cudaMemcpy(x_c,this->pnt_x.data ,num_pnt*sizeof(double) , cudaMemcpyDeviceToHost); cudaMemcpy(y_c,this->pnt_y.data ,num_pnt*sizeof(double) , cudaMemcpyDeviceToHost); cudaMemcpy(cnt_c,this->cnt.data ,set_size*sizeof(uint32_t) , cudaMemcpyDeviceToHost); //test only the first subset_size pairs on CPUs int subset_size=100; double *dist_c=nullptr; hausdorff_test_sequential<double>(subset_size,x_c,y_c,cnt_c,dist_c); assert(dist_c!=nullptr); double *dist_h=new double[num_pair]; cudaMemcpy(dist_h,dist.data ,num_pair*sizeof(double) , cudaMemcpyDeviceToHost); //verify the CPU results are the same as the two GPU results int diff_cnt=0 ; for(int i=0;i<subset_size;i++) { for(int j=0;j<subset_size;j++) { int p1=i*subset_size+j; int p2=i*set_size+j; if(fabs(dist_c[p1]-dist_h[p2])>0.00001) { //std::cout<<"diff:("<<i<<","<<j<<") "<<dist_c[p1]<<" "<<dist_h[p2]<<std::endl; diff_cnt++; } } } if(diff_cnt==0) std::cout<<"GPU and CPU results are identical...................OK"<<std::endl; else std::cout<<"# of GPU and CPU diffs="<<diff_cnt<<std::endl; cudaMemGetInfo(&this->free_mem, &this->total_mem); std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl; } #endif
62b900cc8a0fa094dfaf24e903d332c124eca235.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> #define BDIMX 16 #define BDIMY 16 void initialData(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)( rand() & 0xFF ) / 10.0f; //100.0f; } return; } void printData(float *in, const int size) { for (int i = 0; i < size; i++) { printf("%dth element: %f\n", i, in[i]); } return; } void checkResult(float *hostRef, float *gpuRef, const int size, int showme) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < size; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("different on %dth element: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } if (showme && i > size / 2 && i < size / 2 + 5) { // printf("%dth element: host %f gpu %f\n",i,hostRef[i],gpuRef[i]); } } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nx, const int ny) { for( int iy = 0; iy < ny; ++iy) { for( int ix = 0; ix < nx; ++ix) { out[ix * ny + iy] = in[iy * nx + ix]; } } } __global__ void warmup(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // case 0 copy kernel: access data in rows __global__ void copyRow(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // case 1 copy kernel: access data in columns __global__ void copyCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[ix * ny + iy]; } } // case 2 transpose kernel: read in rows and write in columns __global__ void transposeNaiveRow(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } } __global__ void transposeNaiveCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } } __global__ void transposeRow(float *out, float *in, const int nx, const int ny){ unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int row = gridDim.x * blockDim.x * iy + ix; if(row < ny){ int row_start = row * nx; int row_end = (row+1)*nx; int column_index = row; for(int i = row_start; i < row_end; i++){ out[column_index] = in[i]; column_index += nx; } } } __global__ void transposeUnroll4Row(float *out, float *in, const int nx, const int ny){ unsigned int ix = 4* blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy * nx + ix; unsigned int to = ix * ny + iy; if (ix+3*blockDim.x < nx && iy < ny){ out[to] = in[ti]; out[to + blockDim.x*ny] = in[ti + blockDim.x]; out[to + 2*blockDim.x*ny] = in[ti + 2*blockDim.x]; out[to + 3*blockDim.x*ny] = in[ti + 3*blockDim.x]; } } __global__ void transposeUnroll8Row(float *out, float *in, const int nx, const int ny){ unsigned int ix = 8 * blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy * nx + ix; unsigned int to = ix * ny + iy; if (ix+7*blockDim.x < nx && iy < ny){ out[to] = in[ti]; out[to + blockDim.x*ny] = in[ti + blockDim.x]; out[to + 2*blockDim.x*ny] = in[ti + 2*blockDim.x]; out[to + 3*blockDim.x*ny] = in[ti + 3*blockDim.x]; out[to + 4*blockDim.x*ny] = in[ti + 4*blockDim.x]; out[to + 5*blockDim.x*ny] = in[ti + 5*blockDim.x]; out[to + 6*blockDim.x*ny] = in[ti + 6*blockDim.x]; out[to + 7*blockDim.x*ny] = in[ti + 7*blockDim.x]; } } __global__ void transposeUnroll4Col(float *out, float *in, const int nx, const int ny){ unsigned int ix = 4* blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy*nx + ix; unsigned int to = ix*ny + iy; if(ix + 3*blockDim.x < nx && iy < ny){ out[ti] = in[to]; out[ti + blockDim.x] = in[to + blockDim.x*ny]; out[ti + 2*blockDim.x] = in[to + 2*blockDim.x*ny]; out[ti + 3*blockDim.x] = in[to + 3*blockDim.x*ny]; } } __global__ void transposeDiagonalRow(float *out, float *in, const int nx, const int ny) { unsigned int blk_y = blockIdx.x; unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = blockDim.x * blk_x + threadIdx.x; unsigned int iy = blockDim.y * blk_y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } } __global__ void transposeDiagonalCol(float *out, float *in, const int nx, const int ny) { unsigned int blk_y = blockIdx.x; unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = blockDim.x * blk_x + threadIdx.x; unsigned int iy = blockDim.y * blk_y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } } // main functions int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up array size 2048 int nx = 1 << 11; int ny = 1 << 11; // select a kernel and block size int iKernel = 0; int blockx = 16; int blocky = 16; if (argc > 1) iKernel = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nx = atoi(argv[4]); if (argc > 5) ny = atoi(argv[5]); printf(" with matrix nx %d ny %d with kernel %d\n", nx, ny, iKernel); size_t nBytes = nx * ny * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nx * ny); // transpose at host side transposeHost(hostRef, h_A, nx, ny); // allocate device memory float *d_A, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // warmup to avoide startup overhead double iStart = cpuSecond(); hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_C, d_A, nx, ny); CHECK(hipDeviceSynchronize()); double iElaps = cpuSecond() - iStart; printf("warmup elapsed %f sec\n", iElaps); CHECK(hipGetLastError()); // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); char *kernelName; // set up kernel switch (iKernel) { case 0: kernel = &copyRow; kernelName = "CopyRow "; break; case 1: kernel = &copyCol; kernelName = "CopyCol "; break; case 2: kernel = &transposeNaiveRow; kernelName = "NaiveRow "; break; case 3: kernel = &transposeNaiveCol; kernelName = "NaiveCol "; break; case 4: kernel = &transposeUnroll4Row; kernelName = "Unroll4Row "; grid.x = (nx + block.x * 4 - 1) / (block.x * 4); break; case 5: kernel = &transposeUnroll4Col; kernelName = "Unroll4Col "; grid.x = (nx + block.x * 4 - 1) / (block.x * 4); break; case 6: kernel = &transposeDiagonalRow; kernelName = "DiagonalRow "; break; case 7: kernel = &transposeDiagonalCol; kernelName = "DiagonalCol "; break; case 8: kernel = &transposeRow; kernelName = "transposeRow "; break; case 9: kernel = &transposeUnroll8Row; grid.x = (nx + block.x * 8 - 1) / (block.x * 8); kernelName = "transposeUnroll8Row "; break; } // run kernel iStart = cpuSecond(); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_C, d_A, nx, ny); CHECK(hipDeviceSynchronize()); iElaps = cpuSecond() - iStart; // calculate effective_bandwidth float ibnd = 2 * nx * ny * sizeof(float) / 1e9 / iElaps; printf("%s elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> effective " "bandwidth %f GB\n", kernelName, iElaps, grid.x, grid.y, block.x, block.y, ibnd); CHECK(hipGetLastError()); // check kernel results if (iKernel > 1) { CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nx * ny, 1); } // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
62b900cc8a0fa094dfaf24e903d332c124eca235.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> #define BDIMX 16 #define BDIMY 16 void initialData(float *in, const int size) { for (int i = 0; i < size; i++) { in[i] = (float)( rand() & 0xFF ) / 10.0f; //100.0f; } return; } void printData(float *in, const int size) { for (int i = 0; i < size; i++) { printf("%dth element: %f\n", i, in[i]); } return; } void checkResult(float *hostRef, float *gpuRef, const int size, int showme) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < size; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("different on %dth element: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } if (showme && i > size / 2 && i < size / 2 + 5) { // printf("%dth element: host %f gpu %f\n",i,hostRef[i],gpuRef[i]); } } if (!match) printf("Arrays do not match.\n\n"); } void transposeHost(float *out, float *in, const int nx, const int ny) { for( int iy = 0; iy < ny; ++iy) { for( int ix = 0; ix < nx; ++ix) { out[ix * ny + iy] = in[iy * nx + ix]; } } } __global__ void warmup(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // case 0 copy kernel: access data in rows __global__ void copyRow(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[iy * nx + ix]; } } // case 1 copy kernel: access data in columns __global__ void copyCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[ix * ny + iy]; } } // case 2 transpose kernel: read in rows and write in columns __global__ void transposeNaiveRow(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } } __global__ void transposeNaiveCol(float *out, float *in, const int nx, const int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } } __global__ void transposeRow(float *out, float *in, const int nx, const int ny){ unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int row = gridDim.x * blockDim.x * iy + ix; if(row < ny){ int row_start = row * nx; int row_end = (row+1)*nx; int column_index = row; for(int i = row_start; i < row_end; i++){ out[column_index] = in[i]; column_index += nx; } } } __global__ void transposeUnroll4Row(float *out, float *in, const int nx, const int ny){ unsigned int ix = 4* blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy * nx + ix; unsigned int to = ix * ny + iy; if (ix+3*blockDim.x < nx && iy < ny){ out[to] = in[ti]; out[to + blockDim.x*ny] = in[ti + blockDim.x]; out[to + 2*blockDim.x*ny] = in[ti + 2*blockDim.x]; out[to + 3*blockDim.x*ny] = in[ti + 3*blockDim.x]; } } __global__ void transposeUnroll8Row(float *out, float *in, const int nx, const int ny){ unsigned int ix = 8 * blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy * nx + ix; unsigned int to = ix * ny + iy; if (ix+7*blockDim.x < nx && iy < ny){ out[to] = in[ti]; out[to + blockDim.x*ny] = in[ti + blockDim.x]; out[to + 2*blockDim.x*ny] = in[ti + 2*blockDim.x]; out[to + 3*blockDim.x*ny] = in[ti + 3*blockDim.x]; out[to + 4*blockDim.x*ny] = in[ti + 4*blockDim.x]; out[to + 5*blockDim.x*ny] = in[ti + 5*blockDim.x]; out[to + 6*blockDim.x*ny] = in[ti + 6*blockDim.x]; out[to + 7*blockDim.x*ny] = in[ti + 7*blockDim.x]; } } __global__ void transposeUnroll4Col(float *out, float *in, const int nx, const int ny){ unsigned int ix = 4* blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; unsigned int ti = iy*nx + ix; unsigned int to = ix*ny + iy; if(ix + 3*blockDim.x < nx && iy < ny){ out[ti] = in[to]; out[ti + blockDim.x] = in[to + blockDim.x*ny]; out[ti + 2*blockDim.x] = in[to + 2*blockDim.x*ny]; out[ti + 3*blockDim.x] = in[to + 3*blockDim.x*ny]; } } __global__ void transposeDiagonalRow(float *out, float *in, const int nx, const int ny) { unsigned int blk_y = blockIdx.x; unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = blockDim.x * blk_x + threadIdx.x; unsigned int iy = blockDim.y * blk_y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } } __global__ void transposeDiagonalCol(float *out, float *in, const int nx, const int ny) { unsigned int blk_y = blockIdx.x; unsigned int blk_x = (blockIdx.x + blockIdx.y) % gridDim.x; unsigned int ix = blockDim.x * blk_x + threadIdx.x; unsigned int iy = blockDim.y * blk_y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } } // main functions int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up array size 2048 int nx = 1 << 11; int ny = 1 << 11; // select a kernel and block size int iKernel = 0; int blockx = 16; int blocky = 16; if (argc > 1) iKernel = atoi(argv[1]); if (argc > 2) blockx = atoi(argv[2]); if (argc > 3) blocky = atoi(argv[3]); if (argc > 4) nx = atoi(argv[4]); if (argc > 5) ny = atoi(argv[5]); printf(" with matrix nx %d ny %d with kernel %d\n", nx, ny, iKernel); size_t nBytes = nx * ny * sizeof(float); // execution configuration dim3 block (blockx, blocky); dim3 grid ((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // allocate host memory float *h_A = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nx * ny); // transpose at host side transposeHost(hostRef, h_A, nx, ny); // allocate device memory float *d_A, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // warmup to avoide startup overhead double iStart = cpuSecond(); warmup<<<grid, block>>>(d_C, d_A, nx, ny); CHECK(cudaDeviceSynchronize()); double iElaps = cpuSecond() - iStart; printf("warmup elapsed %f sec\n", iElaps); CHECK(cudaGetLastError()); // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); char *kernelName; // set up kernel switch (iKernel) { case 0: kernel = &copyRow; kernelName = "CopyRow "; break; case 1: kernel = &copyCol; kernelName = "CopyCol "; break; case 2: kernel = &transposeNaiveRow; kernelName = "NaiveRow "; break; case 3: kernel = &transposeNaiveCol; kernelName = "NaiveCol "; break; case 4: kernel = &transposeUnroll4Row; kernelName = "Unroll4Row "; grid.x = (nx + block.x * 4 - 1) / (block.x * 4); break; case 5: kernel = &transposeUnroll4Col; kernelName = "Unroll4Col "; grid.x = (nx + block.x * 4 - 1) / (block.x * 4); break; case 6: kernel = &transposeDiagonalRow; kernelName = "DiagonalRow "; break; case 7: kernel = &transposeDiagonalCol; kernelName = "DiagonalCol "; break; case 8: kernel = &transposeRow; kernelName = "transposeRow "; break; case 9: kernel = &transposeUnroll8Row; grid.x = (nx + block.x * 8 - 1) / (block.x * 8); kernelName = "transposeUnroll8Row "; break; } // run kernel iStart = cpuSecond(); kernel<<<grid, block>>>(d_C, d_A, nx, ny); CHECK(cudaDeviceSynchronize()); iElaps = cpuSecond() - iStart; // calculate effective_bandwidth float ibnd = 2 * nx * ny * sizeof(float) / 1e9 / iElaps; printf("%s elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> effective " "bandwidth %f GB\n", kernelName, iElaps, grid.x, grid.y, block.x, block.y, ibnd); CHECK(cudaGetLastError()); // check kernel results if (iKernel > 1) { CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nx * ny, 1); } // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_C)); free(h_A); free(hostRef); free(gpuRef); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
094812c2aa20b6099b5b38d0b99f38826cc72140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <helper_cuda.h> #include <omp.h> #include <rocblas.h> extern "C" { #include <cblas.h> #define FOR_i_TO_m for (i = 0; i < m; i++) #define FOR_j_TO_n for (j = 0; j < n; j++) #define FOR_l_TO_k for (l = 0; l < k; l++) #define RESET_C FOR_i_TO_m FOR_j_TO_n C[i * n + j] = 0; #define MIN(a,b) ((a) < (b) ? a : b) #define SIZE_A m*k*sizeof(double) #define SIZE_B k*n*sizeof(double) #define SIZE_C m*n*sizeof(double) //choice of the neighbor for matmult_gpu3() 1 = right || 2 = below #define NEIGHBOR 2 //number of elements per thread matmult_gpu4() #define T 12 //block size for matmult_gpu5() #define BLOCK_SIZE 16 //choose if you want the times to be printed #define PRINT_TIMES 0 void matmult_nat(int m,int n,int k,double *A,double *B,double *C); void matmult_lib(int m,int n,int k,double *A,double *B,double *C); void matmult_mkn(int m,int n,int k,double *A,double *B,double *C); void matmult_mnk(int m,int n,int k,double *A,double *B,double *C); void matmult_kmn(int m,int n,int k,double *A,double *B,double *C); void matmult_knm(int m,int n,int k,double *A,double *B,double *C); void matmult_nmk(int m,int n,int k,double *A,double *B,double *C); void matmult_nkm(int m,int n,int k,double *A,double *B,double *C); void matmult_blk(int m,int n,int k,double *A,double *B,double *C, int bs); void matmult_gpu1(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu2(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu3(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu4(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu5(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu6(int m,int n,int k,double *A,double *B,double *C); void matmult_gpulib(int m,int n,int k,double *A,double *B,double *C); } void matmult_nat(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_j_TO_n FOR_l_TO_k C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_lib(int m,int n,int k,double *A,double *B,double *C) { double time0 = omp_get_wtime(); cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,m,n,k,1,A,k,B,n,0,C,n); double time1 = omp_get_wtime(); if (PRINT_TIMES == 1) //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); printf("%d \t %3.6f\n", m, time1 - time0); } void matmult_mkn(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_l_TO_k FOR_j_TO_n C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_mnk(int m,int n,int k,double *A,double *B,double *C) { matmult_nat(m, n, k, A, B, C); } void matmult_kmn(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_l_TO_k FOR_i_TO_m FOR_j_TO_n C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_knm(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_l_TO_k FOR_j_TO_n FOR_i_TO_m C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_nmk(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_j_TO_n FOR_i_TO_m FOR_l_TO_k C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_nkm(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_j_TO_n FOR_l_TO_k FOR_i_TO_m C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_blk(int m,int n,int k,double *A,double *B,double *C, int bs) { int I, J, L, i, j, l, limi, limj, liml; RESET_C for (I = 0; I < m; I+=bs) { limi = MIN(I+bs,m); for (L = 0; L < k; L+=bs) { liml = MIN(L+bs,k); for (J = 0; J < n; J+=bs) { limj = MIN(J+bs,n); for (i = I; i < limi; i++) for (l = L; l < liml; l++) for (j = J; j < limj; j++) C[i * n + j] += A[i * k + l] * B[l * n + j]; }; }; }; } __global__ void gpu1(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_l_TO_k FOR_j_TO_n atomicAdd(&C[i * n + j] , A[i * k + l] * B[l * n + j]); } void matmult_gpu1(int m,int n,int k,double *A,double *B,double *C) { // The GPU uses only 1 thread double *d_A, *d_B, *d_C; // Allocate memory on the GPU hipMalloc((void**)&d_A, SIZE_A); hipMalloc((void**)&d_B, SIZE_B); hipMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device hipMemcpy(d_A, A, SIZE_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, SIZE_B, hipMemcpyHostToDevice); hipMemcpy(d_C, C, SIZE_C, hipMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch hipLaunchKernelGGL(( gpu1), dim3(1),dim3(1), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host hipMemcpy(C, d_C, SIZE_C, hipMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu2(int m,int n,int k,double *A,double *B,double *C) { int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res = 0.0; if(i < m && j < n){ FOR_l_TO_k res += A[i * k + l] * B[l * n + j]; C[i * n + j] = res; } } void matmult_gpu2(int m,int n,int k,double *A,double *B,double *C) { // We use one thread per element of C, which is m * n double *d_A, *d_B, *d_C; // Allocate memory on the GPU hipMalloc((void**)&d_A, SIZE_A); hipMalloc((void**)&d_B, SIZE_B); hipMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device hipMemcpy(d_A, A, SIZE_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, SIZE_B, hipMemcpyHostToDevice); hipMemcpy(d_C, C, SIZE_C, hipMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch hipSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(Gx,Gy,1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D hipLaunchKernelGGL(( gpu2), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host hipMemcpy(C, d_C, SIZE_C, hipMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); if (PRINT_TIMES == 1){ /*printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); printf("time to run the program = %3.6f seconds\n", time2 - time1); printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); printf("total time = %3.6f seconds\n", time3 - time0);*/ printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu3b(int m,int n,int k,double *A,double *B,double *C) { // One thread computes 2 elements of C, that are vertical neighbors. int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res1 = 0.0, res2 = 0.0; if(2*i + 1 < m && j < n){ FOR_l_TO_k { res1 += A[2 *i * k + l] * B[l * n + j]; res2 += A[(2*i+1) * k + l] * B[l * n + j]; } C[2 * i * n + j] = res1; C[(2*i+1) * n + j] = res2; } else if(2*i + 1 == m && j < n){ FOR_l_TO_k res1 += A[2 *i * k + l] * B[l * n + j]; C[2 * i * n + j] = res1; } } __global__ void gpu3r(int m,int n,int k,double *A,double *B,double *C) { // One thread computes 2 elements of C, that are horizontal neighbors. // This kernel was used to compare timings of vertical and horizontal choices, but is the less efficient so gpu3b is preferred. int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res1 = 0.0, res2 = 0.0; if(i < m && 2*j + 1 < n){ FOR_l_TO_k { res1 += A[i * k + l] * B[l * n + 2 * j]; res2 += A[i * k + l] * B[l * n + 2*j+1]; } C[i * n + 2*j] = res1; C[i * n + 2*j+1] = res2; } else if(i < m && 2*j+1 == n){ FOR_l_TO_k res1 += A[i * k + l] * B[l * n + 2*j]; C[i * n + 2*j] = res1; } } void matmult_gpu3(int m,int n,int k,double *A,double *B,double *C) { // Each thread computes 2 elements of C double *d_A, *d_B, *d_C; // Allocate memory on the GPU hipMalloc((void**)&d_A, SIZE_A); hipMalloc((void**)&d_B, SIZE_B); hipMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device hipMemcpy(d_A, A, SIZE_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, SIZE_B, hipMemcpyHostToDevice); hipMemcpy(d_C, C, SIZE_C, hipMemcpyHostToDevice); double time1 = omp_get_wtime(); if(NEIGHBOR==1) { // Cuda launch hipSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(ceil((double)Gx/2),Gy,1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D hipLaunchKernelGGL(( gpu3r), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); } else if(NEIGHBOR==2) { // Cuda launch hipSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(Gx,ceil((double)Gy/2),1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D hipLaunchKernelGGL(( gpu3b), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); } double time2 = omp_get_wtime(); // Transfer data from device to host hipMemcpy(C, d_C, SIZE_C, hipMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu4(int m,int n,int k,double *A,double *B,double *C) { int l, s; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int S = m - i * T; double res[T]; if(T*(i + 1) - 1 < m && j < n){ for(s = 0; s < T; s++) res[s] = 0.0; FOR_l_TO_k { for(s = 0; s < T; s++) res[s] += A[(T *i + s) * k + l] * B[l * n + j]; } for(s = 0; s < T; s++) C[(T*i+s) * n + j] = res[s]; } else if(T*i < m && j < n){ for(s = 0; s < S ; s++) res[s] = 0.0; FOR_l_TO_k { for(s = 0; s < S ; s++) res[s] += A[(T *i + s) * k + l] * B[l * n + j]; } for(s = 0; s < S; s++) C[(T*i+s) * n + j] = res[s]; } } void matmult_gpu4(int m,int n,int k,double *A,double *B,double *C) { // Each thread computes T elements of C double *d_A, *d_B, *d_C; // Allocate memory on the GPU hipMalloc((void**)&d_A, SIZE_A); hipMalloc((void**)&d_B, SIZE_B); hipMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device hipMemcpy(d_A, A, SIZE_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, SIZE_B, hipMemcpyHostToDevice); hipMemcpy(d_C, C, SIZE_C, hipMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch hipSetDevice(1); int K = 32; int Gx = ceil((double) n / K); int Gy = ceil((double) m / K); dim3 dimGrid(Gx,ceil((double)Gy/T),1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D hipLaunchKernelGGL(( gpu4), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C); hipDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host hipMemcpy(C, d_C, SIZE_C, hipMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU hipFree(d_A); hipFree(d_B); hipFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f \n", m, time2 - time1); } } // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; double* elements; } Matrix; // Get a matrix element __device__ double GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, double value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Thread block size // Forward declaration of the matrix multiplication kernel __global__ void gpu5(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void matmult_gpu5(int m,int n,int k,double *A,double *B,double *C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = k; d_A.height = m; size_t size = SIZE_A; hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = n; d_B.height = k; size = SIZE_B; hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = n; d_C.height = m; size = SIZE_C; hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(n / dimBlock.x, m / dimBlock.y); hipLaunchKernelGGL(( gpu5), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipDeviceSynchronize(); // Read C from device memory hipMemcpy(C, d_C.elements, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void gpu5(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue double Cvalue = 0.0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } void matmult_gpulib(int m, int n, int k, double *A, double *B, double *C) { const double alpha = 1.0; const double beta = 0.0; double *d_A; double *d_B ; double *d_C; hipblasHandle_t handle; hipblasCreate(&handle); hipMalloc((void **) &d_A, m*k*sizeof(double)); hipMalloc((void **) &d_B, k*n*sizeof(double)); hipMalloc((void **) &d_C, m*n*sizeof(double)); hipMemcpy(d_A, A, m*k*sizeof(d_A), hipMemcpyHostToDevice); hipMemcpy(d_B, B, k*n*sizeof(d_B), hipMemcpyHostToDevice); hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, d_B, n, d_A, k, &beta, d_C, n); checkCudaErrors(hipDeviceSynchronize()); hipMemcpy(C, d_C, m*n*sizeof(C), hipMemcpyDeviceToHost); hipblasDestroy(handle); hipFree(d_A); hipFree(d_B); hipFree(d_C); } void matmult_gpu6(int m,int n,int k,double *A,double *B,double *C) { }
094812c2aa20b6099b5b38d0b99f38826cc72140.cu
#include <stdio.h> #include <stdlib.h> #include <helper_cuda.h> #include <omp.h> #include <cublas_v2.h> extern "C" { #include <cblas.h> #define FOR_i_TO_m for (i = 0; i < m; i++) #define FOR_j_TO_n for (j = 0; j < n; j++) #define FOR_l_TO_k for (l = 0; l < k; l++) #define RESET_C FOR_i_TO_m FOR_j_TO_n C[i * n + j] = 0; #define MIN(a,b) ((a) < (b) ? a : b) #define SIZE_A m*k*sizeof(double) #define SIZE_B k*n*sizeof(double) #define SIZE_C m*n*sizeof(double) //choice of the neighbor for matmult_gpu3() 1 = right || 2 = below #define NEIGHBOR 2 //number of elements per thread matmult_gpu4() #define T 12 //block size for matmult_gpu5() #define BLOCK_SIZE 16 //choose if you want the times to be printed #define PRINT_TIMES 0 void matmult_nat(int m,int n,int k,double *A,double *B,double *C); void matmult_lib(int m,int n,int k,double *A,double *B,double *C); void matmult_mkn(int m,int n,int k,double *A,double *B,double *C); void matmult_mnk(int m,int n,int k,double *A,double *B,double *C); void matmult_kmn(int m,int n,int k,double *A,double *B,double *C); void matmult_knm(int m,int n,int k,double *A,double *B,double *C); void matmult_nmk(int m,int n,int k,double *A,double *B,double *C); void matmult_nkm(int m,int n,int k,double *A,double *B,double *C); void matmult_blk(int m,int n,int k,double *A,double *B,double *C, int bs); void matmult_gpu1(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu2(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu3(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu4(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu5(int m,int n,int k,double *A,double *B,double *C); void matmult_gpu6(int m,int n,int k,double *A,double *B,double *C); void matmult_gpulib(int m,int n,int k,double *A,double *B,double *C); } void matmult_nat(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_j_TO_n FOR_l_TO_k C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_lib(int m,int n,int k,double *A,double *B,double *C) { double time0 = omp_get_wtime(); cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,m,n,k,1,A,k,B,n,0,C,n); double time1 = omp_get_wtime(); if (PRINT_TIMES == 1) //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); printf("%d \t %3.6f\n", m, time1 - time0); } void matmult_mkn(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_l_TO_k FOR_j_TO_n C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_mnk(int m,int n,int k,double *A,double *B,double *C) { matmult_nat(m, n, k, A, B, C); } void matmult_kmn(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_l_TO_k FOR_i_TO_m FOR_j_TO_n C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_knm(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_l_TO_k FOR_j_TO_n FOR_i_TO_m C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_nmk(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_j_TO_n FOR_i_TO_m FOR_l_TO_k C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_nkm(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_j_TO_n FOR_l_TO_k FOR_i_TO_m C[i * n + j] += A[i * k + l] * B[l * n + j]; } void matmult_blk(int m,int n,int k,double *A,double *B,double *C, int bs) { int I, J, L, i, j, l, limi, limj, liml; RESET_C for (I = 0; I < m; I+=bs) { limi = MIN(I+bs,m); for (L = 0; L < k; L+=bs) { liml = MIN(L+bs,k); for (J = 0; J < n; J+=bs) { limj = MIN(J+bs,n); for (i = I; i < limi; i++) for (l = L; l < liml; l++) for (j = J; j < limj; j++) C[i * n + j] += A[i * k + l] * B[l * n + j]; }; }; }; } __global__ void gpu1(int m,int n,int k,double *A,double *B,double *C) { int i, j, l; RESET_C FOR_i_TO_m FOR_l_TO_k FOR_j_TO_n atomicAdd(&C[i * n + j] , A[i * k + l] * B[l * n + j]); } void matmult_gpu1(int m,int n,int k,double *A,double *B,double *C) { // The GPU uses only 1 thread double *d_A, *d_B, *d_C; // Allocate memory on the GPU cudaMalloc((void**)&d_A, SIZE_A); cudaMalloc((void**)&d_B, SIZE_B); cudaMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device cudaMemcpy(d_A, A, SIZE_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, SIZE_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, SIZE_C, cudaMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch gpu1<<<1,1>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host cudaMemcpy(C, d_C, SIZE_C, cudaMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu2(int m,int n,int k,double *A,double *B,double *C) { int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res = 0.0; if(i < m && j < n){ FOR_l_TO_k res += A[i * k + l] * B[l * n + j]; C[i * n + j] = res; } } void matmult_gpu2(int m,int n,int k,double *A,double *B,double *C) { // We use one thread per element of C, which is m * n double *d_A, *d_B, *d_C; // Allocate memory on the GPU cudaMalloc((void**)&d_A, SIZE_A); cudaMalloc((void**)&d_B, SIZE_B); cudaMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device cudaMemcpy(d_A, A, SIZE_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, SIZE_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, SIZE_C, cudaMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch cudaSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(Gx,Gy,1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D gpu2<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host cudaMemcpy(C, d_C, SIZE_C, cudaMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); if (PRINT_TIMES == 1){ /*printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); printf("time to run the program = %3.6f seconds\n", time2 - time1); printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); printf("total time = %3.6f seconds\n", time3 - time0);*/ printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu3b(int m,int n,int k,double *A,double *B,double *C) { // One thread computes 2 elements of C, that are vertical neighbors. int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res1 = 0.0, res2 = 0.0; if(2*i + 1 < m && j < n){ FOR_l_TO_k { res1 += A[2 *i * k + l] * B[l * n + j]; res2 += A[(2*i+1) * k + l] * B[l * n + j]; } C[2 * i * n + j] = res1; C[(2*i+1) * n + j] = res2; } else if(2*i + 1 == m && j < n){ FOR_l_TO_k res1 += A[2 *i * k + l] * B[l * n + j]; C[2 * i * n + j] = res1; } } __global__ void gpu3r(int m,int n,int k,double *A,double *B,double *C) { // One thread computes 2 elements of C, that are horizontal neighbors. // This kernel was used to compare timings of vertical and horizontal choices, but is the less efficient so gpu3b is preferred. int l; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; double res1 = 0.0, res2 = 0.0; if(i < m && 2*j + 1 < n){ FOR_l_TO_k { res1 += A[i * k + l] * B[l * n + 2 * j]; res2 += A[i * k + l] * B[l * n + 2*j+1]; } C[i * n + 2*j] = res1; C[i * n + 2*j+1] = res2; } else if(i < m && 2*j+1 == n){ FOR_l_TO_k res1 += A[i * k + l] * B[l * n + 2*j]; C[i * n + 2*j] = res1; } } void matmult_gpu3(int m,int n,int k,double *A,double *B,double *C) { // Each thread computes 2 elements of C double *d_A, *d_B, *d_C; // Allocate memory on the GPU cudaMalloc((void**)&d_A, SIZE_A); cudaMalloc((void**)&d_B, SIZE_B); cudaMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device cudaMemcpy(d_A, A, SIZE_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, SIZE_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, SIZE_C, cudaMemcpyHostToDevice); double time1 = omp_get_wtime(); if(NEIGHBOR==1) { // Cuda launch cudaSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(ceil((double)Gx/2),Gy,1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D gpu3r<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); } else if(NEIGHBOR==2) { // Cuda launch cudaSetDevice(1); int K = 32; int Gx = ceil((double)n / K); int Gy = ceil((double)m / K); dim3 dimGrid(Gx,ceil((double)Gy/2),1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D gpu3b<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); } double time2 = omp_get_wtime(); // Transfer data from device to host cudaMemcpy(C, d_C, SIZE_C, cudaMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f\n", m, time2 - time1); } } __global__ void gpu4(int m,int n,int k,double *A,double *B,double *C) { int l, s; int j = threadIdx.x + blockIdx.x * blockDim.x; int i = threadIdx.y + blockIdx.y * blockDim.y; int S = m - i * T; double res[T]; if(T*(i + 1) - 1 < m && j < n){ for(s = 0; s < T; s++) res[s] = 0.0; FOR_l_TO_k { for(s = 0; s < T; s++) res[s] += A[(T *i + s) * k + l] * B[l * n + j]; } for(s = 0; s < T; s++) C[(T*i+s) * n + j] = res[s]; } else if(T*i < m && j < n){ for(s = 0; s < S ; s++) res[s] = 0.0; FOR_l_TO_k { for(s = 0; s < S ; s++) res[s] += A[(T *i + s) * k + l] * B[l * n + j]; } for(s = 0; s < S; s++) C[(T*i+s) * n + j] = res[s]; } } void matmult_gpu4(int m,int n,int k,double *A,double *B,double *C) { // Each thread computes T elements of C double *d_A, *d_B, *d_C; // Allocate memory on the GPU cudaMalloc((void**)&d_A, SIZE_A); cudaMalloc((void**)&d_B, SIZE_B); cudaMalloc((void**)&d_C, SIZE_C); double time0 = omp_get_wtime(); // Transfer data from host to device cudaMemcpy(d_A, A, SIZE_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, SIZE_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, SIZE_C, cudaMemcpyHostToDevice); double time1 = omp_get_wtime(); // Cuda launch cudaSetDevice(1); int K = 32; int Gx = ceil((double) n / K); int Gy = ceil((double) m / K); dim3 dimGrid(Gx,ceil((double)Gy/T),1); // number of blocks 2D dim3 dimBlock(K,K,1); // number of threads per block 2D gpu4<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); double time2 = omp_get_wtime(); // Transfer data from device to host cudaMemcpy(C, d_C, SIZE_C, cudaMemcpyDeviceToHost); double time3 = omp_get_wtime(); // Free the allocated memory on the GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); if (PRINT_TIMES == 1){ //printf("time to transfer HtoD = %3.6f seconds\n", time1 - time0); //printf("time to run the program = %3.6f seconds\n", time2 - time1); //printf("time to transfer DtoH = %3.6f seconds\n", time3 - time2); //printf("total time = %3.6f seconds\n", time3 - time0); printf("%d \t %3.6f \n", m, time2 - time1); } } // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; double* elements; } Matrix; // Get a matrix element __device__ double GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, double value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Thread block size // Forward declaration of the matrix multiplication kernel __global__ void gpu5(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void matmult_gpu5(int m,int n,int k,double *A,double *B,double *C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = k; d_A.height = m; size_t size = SIZE_A; cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = n; d_B.height = k; size = SIZE_B; cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = n; d_C.height = m; size = SIZE_C; cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(n / dimBlock.x, m / dimBlock.y); gpu5<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaDeviceSynchronize(); // Read C from device memory cudaMemcpy(C, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void gpu5(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue double Cvalue = 0.0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } void matmult_gpulib(int m, int n, int k, double *A, double *B, double *C) { const double alpha = 1.0; const double beta = 0.0; double *d_A; double *d_B ; double *d_C; cublasHandle_t handle; cublasCreate(&handle); cudaMalloc((void **) &d_A, m*k*sizeof(double)); cudaMalloc((void **) &d_B, k*n*sizeof(double)); cudaMalloc((void **) &d_C, m*n*sizeof(double)); cudaMemcpy(d_A, A, m*k*sizeof(d_A), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, k*n*sizeof(d_B), cudaMemcpyHostToDevice); cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, d_B, n, d_A, k, &beta, d_C, n); checkCudaErrors(cudaDeviceSynchronize()); cudaMemcpy(C, d_C, m*n*sizeof(C), cudaMemcpyDeviceToHost); cublasDestroy(handle); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } void matmult_gpu6(int m,int n,int k,double *A,double *B,double *C) { }
5edd67bcb6c1ab550554c442e8bded031a1806a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hairLib.h" #include <fstream> #define M_PI 3.141516 #define rootDistribType 0 template<int type> __global__ void initHairs(float* X, float* Y, float*Z, float hxy, float hz); template<int type> __device__ float computeX(float hxy); template<> __global__ void initHairs<0>(float* X, float* Y, float*Z, float hxy, float hz) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; X[idx] = threadIdx.x * hxy; Y[idx] = line * hxy; Z[idx] = Zi * hz ; } template<> __device__ float computeX<1>(float hxy) { float hx = hxy * cos( - M_PI / 2.0 + ((float)blockIdx.x * M_PI )/ ((float)blockDim.x) ); return threadIdx.x * hx - (blockDim.x * hx)/ 2.0; } template<> __device__ float computeX<0>(float hxy) { return threadIdx.x * hxy; } template<> __global__ void initHairs<1>(float* X, float* Y, float*Z, float hxy, float hz) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; X[idx] = computeX<1>(hxy); Y[idx] = line * hxy; Z[idx] = Zi * hz ; } __global__ void applyGravity(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, float dt, float windActivity, float windX, float windY, float windZ) { const float gravity = 9.81; const float mass = 1e-2; const float massInv = 1e2; const float alpha = 1.5 * mass; // Rayleigh damping int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x; float Imass = -gravity * dt / massInv; float iWind = -50.0 * dt / massInv; float Idampx = - alpha * vx[idx] * dt; float Idampy = - alpha * vy[idx] * dt; float Idampz = - alpha * vz[idx] * dt; vx[idx] += (Idampx + iWind * windActivity * windX) / mass; vy[idx] += (Imass + Idampy + iWind * windActivity * windY) / mass; vz[idx] += (Idampz + iWind * windActivity * windZ) / mass; } __global__ void integrateK(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, float dt) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; // Integrate velocity X[idx] += vx[idx] * dt; Y[idx] += vy[idx] * dt; Z[idx] += vz[idx] * dt; } __device__ void computeConstraintImpulse(const float relX, const float relY, const float relZ, const float relvX, const float relvY, const float relvZ, const float dt, const float mass, const float massInv, const float refDist, float& changeX, float& changeY, float& changeZ) { float dist = sqrt( relX * relX + relY * relY + relZ * relZ); float dx = relX / dist; float dy = relY / dist; float dz = relZ / dist; float velProj = dx * relvX + dy * relvY + dz * relvZ; float gap = dist - refDist ; float constI = (gap / dt + velProj) / massInv ; changeX = constI * dx / mass * (dist != 0.); changeY = constI * dy / mass * (dist != 0.); changeZ = constI * dz / mass * (dist != 0.); } __device__ void computeBendingConstraintImpulse(const float relX, const float relY, const float relZ, const float relvX, const float relvY, const float relvZ, const float dt, const float mass, const float massInv, const float refDist, float& changeX, float& changeY, float& changeZ) { float dist = sqrt( relX * relX + relY * relY + relZ * relZ); float dx = relX / dist; float dy = relY / dist; float dz = relZ / dist; float velProj = dx * relvX + dy * relvY + dz * relvZ; float gap = dist - refDist ; float constI = (gap / dt + velProj) / massInv ; changeX = constI * dx / mass * (gap < 0.) * (dist != 0.); changeY = constI * dy / mass * (gap < 0.) * (dist != 0.); changeZ = constI * dz / mass * (gap < 0.) * (dist != 0.); } template<int type> __global__ void applyBothConstraint(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, int hairLenght, float hxy, float hz, float dt) { const float mass = 1e-2; const float massInv = 1e2; int line = blockIdx.x; int lineOffset = line * blockDim.x * blockDim.y; { float massPositionXc = 0.; float massPositionYc = 0.; float massPositionZc = 0.; float massVelocityXc = 0.; float massVelocityYc = 0.; float massVelocityZc = 0.; float massPositionXn = 0.; float massPositionYn = 0.; float massPositionZn = 0.; float massVelocityXn = 0.; float massVelocityYn = 0.; float massVelocityZn = 0.; float massPositionXn1 = 0.; float massPositionYn1 = 0.; float massPositionZn1 = 0.; float massVelocityXn1 = 0.; float massVelocityYn1 = 0.; float massVelocityZn1 = 0.; { int idxC = 0 + lineOffset + threadIdx.x ; massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; // handle hair root float relX = computeX<type>(hxy) - massPositionXc; float relY = line * hxy - massPositionYc; float relZ = - massPositionZc; float relvX = - massVelocityXc; float relvY = - massVelocityYc; float relvZ = - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv, 0., changeX, changeY,changeZ); vx[idxC] += changeX; vy[idxC] += changeY; vz[idxC] += changeZ; massVelocityXc += changeX; massVelocityYc += changeY; massVelocityZc += changeZ; } { int idxC = gridDim.x * blockDim.x * blockDim.y + lineOffset + threadIdx.x ; massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; // handle hair root float relX = computeX<type>(hxy) - massPositionXc; float relY = line * hxy - massPositionYc; float relZ = hz - massPositionZc; float relvX = - massVelocityXc; float relvY = - massVelocityYc; float relvZ = - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv, 0., changeX, changeY,changeZ); vx[idxC] += changeX; vy[idxC] += changeY; vz[idxC] += changeZ; massVelocityXc += changeX; massVelocityYc += changeY; massVelocityZc += changeZ; } for(int z = 0 ; z < hairLenght-2 ; ++z) { int ZoffC = z * (gridDim.x) * blockDim.x * blockDim.y; int ZoffN = (z+1) * (gridDim.x) * blockDim.x * blockDim.y; int ZoffN1 = (z+2) * (gridDim.x) * blockDim.x * blockDim.y; int idxC = ZoffC + lineOffset + threadIdx.x ; int idxN = ZoffN + lineOffset + threadIdx.x ; int idxN1 = ZoffN1 + lineOffset + threadIdx.x ; //Current particule massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; //Next particule massPositionXn = X[idxN]; massPositionYn = Y[idxN]; massPositionZn = Z[idxN]; massVelocityXn = vx[idxN]; massVelocityYn = vy[idxN]; massVelocityZn = vz[idxN]; //Following Next particule massPositionXn1 = X[idxN1]; massPositionYn1 = Y[idxN1]; massPositionZn1 = Z[idxN1]; massVelocityXn1 = vx[idxN1]; massVelocityYn1 = vy[idxN1]; massVelocityZn1 = vz[idxN1]; //////////////////////////////////////////// // Apply stretch constraint // //////////////////////////////////////////// float relX = massPositionXn - massPositionXc; float relY = massPositionYn - massPositionYc; float relZ = massPositionZn - massPositionZc; float relvX = massVelocityXn - massVelocityXc; float relvY = massVelocityYn - massVelocityYc; float relvZ = massVelocityZn - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv + massInv * (z != 1), hz, changeX, changeY,changeZ); vx[idxC] += changeX * (z != 1); vy[idxC] += changeY * (z != 1); vz[idxC] += changeZ * (z != 1); vx[idxN] += -changeX; vy[idxN] += -changeY; vz[idxN] += -changeZ; //////////////////////////////////////////// // Apply bending constraint // //////////////////////////////////////////// relX = massPositionXn1 - massPositionXc; relY = massPositionYn1 - massPositionYc; relZ = massPositionZn1 - massPositionZc; relvX = massVelocityXn1 - massVelocityXc; relvY = massVelocityYn1 - massVelocityYc; relvZ = massVelocityZn1 - massVelocityZc; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv + massInv * (z != 1), 2*hz, changeX, changeY,changeZ); vx[idxC] += changeX * (z != 1); vy[idxC] += changeY * (z != 1); vz[idxC] += changeZ * (z != 1); vx[idxN1] += -changeX; vy[idxN1] += -changeY; vz[idxN1] += -changeZ; } } } HairSimulation::HairSimulation(float x, float y, float z, float radius, int gridX, int gridY, int hairLenght, float hxy, float hz) : x(x), y(y), z(z), radius(radius), hairLenght(hairLenght), d_x(NULL), d_y(NULL), d_z(NULL), hxy(hxy), hz(hz), gridX(gridX), gridY(gridY) { } HairSimulation::~HairSimulation() { if(d_x) hipFree(d_x); if(d_y) hipFree(d_y); if(d_z) hipFree(d_z); } void HairSimulation::initHair() { dim3 gridSize(gridY, hairLenght); dim3 blockSize(gridX, 1); int size = gridSize.x * gridSize.y * blockSize.x * blockSize.y; ///////////////////////////////////////// // Init Position // ///////////////////////////////////////// hipMalloc(&d_x, size * sizeof(float)); hipMalloc(&d_y, size * sizeof(float)); hipMalloc(&d_z, size * sizeof(float)); hipMalloc(&d_gap, size * sizeof(float)); hipMalloc(&d_velProj, size * sizeof(float)); hipMalloc(&d_dirX, size * sizeof(float)); hipMalloc(&d_dirY, size * sizeof(float)); hipMalloc(&d_dirZ, size * sizeof(float)); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( initHairs<rootDistribType>) , dim3(gridSize), dim3(blockSize) , 0, 0, d_x, d_y, d_z, hxy, hz); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); std::ofstream out("log.cudada"); out << "initHairs : " << time << std::endl; out.close(); X.resize(size); Y.resize(size); Z.resize(size); hipMemcpy(&(X[0]), d_x, size * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&(Y[0]), d_y, size * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&(Z[0]), d_z, size * sizeof(float), hipMemcpyDeviceToHost); ///////////////////////////////////////// // Init Velocity // ///////////////////////////////////////// hipMalloc(&d_vx, size * sizeof(float)); hipMalloc(&d_vy, size * sizeof(float)); hipMalloc(&d_vz, size * sizeof(float)); hipMemset(d_vx, 0, sizeof(float)); hipMemset(d_vy, 0, sizeof(float)); hipMemset(d_vz, 0, sizeof(float)); } float windImpulse(int t) { float x = (float)(t % 1024 - 512) / (float) 1024 * 15; //if(x > -15. && x < 15.0) { if(x == 0.) return 1.0; else { float ret = sin(x) / x; return ret; } } //else // return 0.; } void HairSimulation::integrate(float dt) { srand ( time(NULL) ); static int windCounter = 0; windCounter++; dim3 gridSize(gridY, hairLenght); dim3 blockSize(gridX, 1); int size = gridSize.x * gridSize.y * blockSize.x * blockSize.y; ///////////////////////////////////////// // Integrate Free Motion // ///////////////////////////////////////// hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); float windX = (float)rand() / (float)RAND_MAX ; float windY = (float)rand() / (float)RAND_MAX ; float windZ = fabs((float)rand() / (float)RAND_MAX ) ; float norm = sqrt(windX*windX + windY*windY + windZ*windZ); windX /= norm; windY /= norm; windZ /= norm; hipLaunchKernelGGL(( applyGravity), dim3(gridSize), dim3(blockSize) , 0, 0, d_x, d_y, d_z, d_vx, d_vy, d_vz, dt, windImpulse(windCounter), windX, windY, windZ); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); std::ofstream out("log.cudada", std::ios::app); out << "gravity : " << time << std::endl; for(int nbIter = 0 ; nbIter < 5 ; ++ nbIter) { ///////////////////////////////////////// // Integrate Constrained Motion // ///////////////////////////////////////// dim3 gridSize2(gridX, 1); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( applyBothConstraint<rootDistribType>), dim3(gridSize2), dim3(blockSize) , 0, 0, d_x, d_y, d_z, d_vx, d_vy, d_vz, hairLenght, hxy, hz, dt); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); out << hipGetErrorString(hipGetLastError()) << std::endl; out << "constraint : " << time << std::endl; ///////////////////////////////////////////////// // Integrate Bending Constrained Motion // ///////////////////////////////////////////////// hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); //applyBendingConstraint<<<gridSize2, blockSize >>>(d_x, d_y, d_z, // d_vx, d_vy, d_vz, // hairLenght, // hxy, // hz, // dt); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); out << hipGetErrorString(hipGetLastError()) << std::endl; out << "bending constraint : " << time << std::endl; } ///////////////////////////////////////// // Time integration // ///////////////////////////////////////// hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); hipLaunchKernelGGL(( integrateK), dim3(gridSize), dim3(blockSize) , 0, 0, d_x, d_y, d_z, d_vx, d_vy, d_vz, dt); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); out << "integrate : " << time << std::endl; out.close(); ///////////////////////////////////////// // Transfter data // ///////////////////////////////////////// hipMemcpy(&(X[0]), d_x, size * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&(Y[0]), d_y, size * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&(Z[0]), d_z, size * sizeof(float), hipMemcpyDeviceToHost); } const std::vector<float>& HairSimulation::getMassPositionX() { return X; } const std::vector<float>& HairSimulation::getMassPositionY() { return Y; } const std::vector<float>& HairSimulation::getMassPositionZ() { return Z; }
5edd67bcb6c1ab550554c442e8bded031a1806a7.cu
#include "hairLib.h" #include <fstream> #define M_PI 3.141516 #define rootDistribType 0 template<int type> __global__ void initHairs(float* X, float* Y, float*Z, float hxy, float hz); template<int type> __device__ float computeX(float hxy); template<> __global__ void initHairs<0>(float* X, float* Y, float*Z, float hxy, float hz) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; X[idx] = threadIdx.x * hxy; Y[idx] = line * hxy; Z[idx] = Zi * hz ; } template<> __device__ float computeX<1>(float hxy) { float hx = hxy * cos( - M_PI / 2.0 + ((float)blockIdx.x * M_PI )/ ((float)blockDim.x) ); return threadIdx.x * hx - (blockDim.x * hx)/ 2.0; } template<> __device__ float computeX<0>(float hxy) { return threadIdx.x * hxy; } template<> __global__ void initHairs<1>(float* X, float* Y, float*Z, float hxy, float hz) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; X[idx] = computeX<1>(hxy); Y[idx] = line * hxy; Z[idx] = Zi * hz ; } __global__ void applyGravity(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, float dt, float windActivity, float windX, float windY, float windZ) { const float gravity = 9.81; const float mass = 1e-2; const float massInv = 1e2; const float alpha = 1.5 * mass; // Rayleigh damping int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x; float Imass = -gravity * dt / massInv; float iWind = -50.0 * dt / massInv; float Idampx = - alpha * vx[idx] * dt; float Idampy = - alpha * vy[idx] * dt; float Idampz = - alpha * vz[idx] * dt; vx[idx] += (Idampx + iWind * windActivity * windX) / mass; vy[idx] += (Imass + Idampy + iWind * windActivity * windY) / mass; vz[idx] += (Idampz + iWind * windActivity * windZ) / mass; } __global__ void integrateK(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, float dt) { int line = blockIdx.x; int Zi = blockIdx.y; int lineOffset = line * blockDim.x * blockDim.y; int Zoffset = Zi * (gridDim.x) * blockDim.x * blockDim.y; int idx = Zoffset + lineOffset + threadIdx.x ; // Integrate velocity X[idx] += vx[idx] * dt; Y[idx] += vy[idx] * dt; Z[idx] += vz[idx] * dt; } __device__ void computeConstraintImpulse(const float relX, const float relY, const float relZ, const float relvX, const float relvY, const float relvZ, const float dt, const float mass, const float massInv, const float refDist, float& changeX, float& changeY, float& changeZ) { float dist = sqrt( relX * relX + relY * relY + relZ * relZ); float dx = relX / dist; float dy = relY / dist; float dz = relZ / dist; float velProj = dx * relvX + dy * relvY + dz * relvZ; float gap = dist - refDist ; float constI = (gap / dt + velProj) / massInv ; changeX = constI * dx / mass * (dist != 0.); changeY = constI * dy / mass * (dist != 0.); changeZ = constI * dz / mass * (dist != 0.); } __device__ void computeBendingConstraintImpulse(const float relX, const float relY, const float relZ, const float relvX, const float relvY, const float relvZ, const float dt, const float mass, const float massInv, const float refDist, float& changeX, float& changeY, float& changeZ) { float dist = sqrt( relX * relX + relY * relY + relZ * relZ); float dx = relX / dist; float dy = relY / dist; float dz = relZ / dist; float velProj = dx * relvX + dy * relvY + dz * relvZ; float gap = dist - refDist ; float constI = (gap / dt + velProj) / massInv ; changeX = constI * dx / mass * (gap < 0.) * (dist != 0.); changeY = constI * dy / mass * (gap < 0.) * (dist != 0.); changeZ = constI * dz / mass * (gap < 0.) * (dist != 0.); } template<int type> __global__ void applyBothConstraint(float* X, float* Y, float*Z, float* vx, float* vy, float* vz, int hairLenght, float hxy, float hz, float dt) { const float mass = 1e-2; const float massInv = 1e2; int line = blockIdx.x; int lineOffset = line * blockDim.x * blockDim.y; { float massPositionXc = 0.; float massPositionYc = 0.; float massPositionZc = 0.; float massVelocityXc = 0.; float massVelocityYc = 0.; float massVelocityZc = 0.; float massPositionXn = 0.; float massPositionYn = 0.; float massPositionZn = 0.; float massVelocityXn = 0.; float massVelocityYn = 0.; float massVelocityZn = 0.; float massPositionXn1 = 0.; float massPositionYn1 = 0.; float massPositionZn1 = 0.; float massVelocityXn1 = 0.; float massVelocityYn1 = 0.; float massVelocityZn1 = 0.; { int idxC = 0 + lineOffset + threadIdx.x ; massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; // handle hair root float relX = computeX<type>(hxy) - massPositionXc; float relY = line * hxy - massPositionYc; float relZ = - massPositionZc; float relvX = - massVelocityXc; float relvY = - massVelocityYc; float relvZ = - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv, 0., changeX, changeY,changeZ); vx[idxC] += changeX; vy[idxC] += changeY; vz[idxC] += changeZ; massVelocityXc += changeX; massVelocityYc += changeY; massVelocityZc += changeZ; } { int idxC = gridDim.x * blockDim.x * blockDim.y + lineOffset + threadIdx.x ; massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; // handle hair root float relX = computeX<type>(hxy) - massPositionXc; float relY = line * hxy - massPositionYc; float relZ = hz - massPositionZc; float relvX = - massVelocityXc; float relvY = - massVelocityYc; float relvZ = - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv, 0., changeX, changeY,changeZ); vx[idxC] += changeX; vy[idxC] += changeY; vz[idxC] += changeZ; massVelocityXc += changeX; massVelocityYc += changeY; massVelocityZc += changeZ; } for(int z = 0 ; z < hairLenght-2 ; ++z) { int ZoffC = z * (gridDim.x) * blockDim.x * blockDim.y; int ZoffN = (z+1) * (gridDim.x) * blockDim.x * blockDim.y; int ZoffN1 = (z+2) * (gridDim.x) * blockDim.x * blockDim.y; int idxC = ZoffC + lineOffset + threadIdx.x ; int idxN = ZoffN + lineOffset + threadIdx.x ; int idxN1 = ZoffN1 + lineOffset + threadIdx.x ; //Current particule massPositionXc = X[idxC]; massPositionYc = Y[idxC]; massPositionZc = Z[idxC]; massVelocityXc = vx[idxC]; massVelocityYc = vy[idxC]; massVelocityZc = vz[idxC]; //Next particule massPositionXn = X[idxN]; massPositionYn = Y[idxN]; massPositionZn = Z[idxN]; massVelocityXn = vx[idxN]; massVelocityYn = vy[idxN]; massVelocityZn = vz[idxN]; //Following Next particule massPositionXn1 = X[idxN1]; massPositionYn1 = Y[idxN1]; massPositionZn1 = Z[idxN1]; massVelocityXn1 = vx[idxN1]; massVelocityYn1 = vy[idxN1]; massVelocityZn1 = vz[idxN1]; //////////////////////////////////////////// // Apply stretch constraint // //////////////////////////////////////////// float relX = massPositionXn - massPositionXc; float relY = massPositionYn - massPositionYc; float relZ = massPositionZn - massPositionZc; float relvX = massVelocityXn - massVelocityXc; float relvY = massVelocityYn - massVelocityYc; float relvZ = massVelocityZn - massVelocityZc; float changeX, changeY, changeZ; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv + massInv * (z != 1), hz, changeX, changeY,changeZ); vx[idxC] += changeX * (z != 1); vy[idxC] += changeY * (z != 1); vz[idxC] += changeZ * (z != 1); vx[idxN] += -changeX; vy[idxN] += -changeY; vz[idxN] += -changeZ; //////////////////////////////////////////// // Apply bending constraint // //////////////////////////////////////////// relX = massPositionXn1 - massPositionXc; relY = massPositionYn1 - massPositionYc; relZ = massPositionZn1 - massPositionZc; relvX = massVelocityXn1 - massVelocityXc; relvY = massVelocityYn1 - massVelocityYc; relvZ = massVelocityZn1 - massVelocityZc; computeConstraintImpulse(relX, relY, relZ, relvX, relvY, relvZ, dt, mass, massInv + massInv * (z != 1), 2*hz, changeX, changeY,changeZ); vx[idxC] += changeX * (z != 1); vy[idxC] += changeY * (z != 1); vz[idxC] += changeZ * (z != 1); vx[idxN1] += -changeX; vy[idxN1] += -changeY; vz[idxN1] += -changeZ; } } } HairSimulation::HairSimulation(float x, float y, float z, float radius, int gridX, int gridY, int hairLenght, float hxy, float hz) : x(x), y(y), z(z), radius(radius), hairLenght(hairLenght), d_x(NULL), d_y(NULL), d_z(NULL), hxy(hxy), hz(hz), gridX(gridX), gridY(gridY) { } HairSimulation::~HairSimulation() { if(d_x) cudaFree(d_x); if(d_y) cudaFree(d_y); if(d_z) cudaFree(d_z); } void HairSimulation::initHair() { dim3 gridSize(gridY, hairLenght); dim3 blockSize(gridX, 1); int size = gridSize.x * gridSize.y * blockSize.x * blockSize.y; ///////////////////////////////////////// // Init Position // ///////////////////////////////////////// cudaMalloc(&d_x, size * sizeof(float)); cudaMalloc(&d_y, size * sizeof(float)); cudaMalloc(&d_z, size * sizeof(float)); cudaMalloc(&d_gap, size * sizeof(float)); cudaMalloc(&d_velProj, size * sizeof(float)); cudaMalloc(&d_dirX, size * sizeof(float)); cudaMalloc(&d_dirY, size * sizeof(float)); cudaMalloc(&d_dirZ, size * sizeof(float)); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); initHairs<rootDistribType> <<<gridSize, blockSize >>>(d_x, d_y, d_z, hxy, hz); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); std::ofstream out("log.cudada"); out << "initHairs : " << time << std::endl; out.close(); X.resize(size); Y.resize(size); Z.resize(size); cudaMemcpy(&(X[0]), d_x, size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&(Y[0]), d_y, size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&(Z[0]), d_z, size * sizeof(float), cudaMemcpyDeviceToHost); ///////////////////////////////////////// // Init Velocity // ///////////////////////////////////////// cudaMalloc(&d_vx, size * sizeof(float)); cudaMalloc(&d_vy, size * sizeof(float)); cudaMalloc(&d_vz, size * sizeof(float)); cudaMemset(d_vx, 0, sizeof(float)); cudaMemset(d_vy, 0, sizeof(float)); cudaMemset(d_vz, 0, sizeof(float)); } float windImpulse(int t) { float x = (float)(t % 1024 - 512) / (float) 1024 * 15; //if(x > -15. && x < 15.0) { if(x == 0.) return 1.0; else { float ret = sin(x) / x; return ret; } } //else // return 0.; } void HairSimulation::integrate(float dt) { srand ( time(NULL) ); static int windCounter = 0; windCounter++; dim3 gridSize(gridY, hairLenght); dim3 blockSize(gridX, 1); int size = gridSize.x * gridSize.y * blockSize.x * blockSize.y; ///////////////////////////////////////// // Integrate Free Motion // ///////////////////////////////////////// cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); float windX = (float)rand() / (float)RAND_MAX ; float windY = (float)rand() / (float)RAND_MAX ; float windZ = fabs((float)rand() / (float)RAND_MAX ) ; float norm = sqrt(windX*windX + windY*windY + windZ*windZ); windX /= norm; windY /= norm; windZ /= norm; applyGravity<<<gridSize, blockSize >>>(d_x, d_y, d_z, d_vx, d_vy, d_vz, dt, windImpulse(windCounter), windX, windY, windZ); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); std::ofstream out("log.cudada", std::ios::app); out << "gravity : " << time << std::endl; for(int nbIter = 0 ; nbIter < 5 ; ++ nbIter) { ///////////////////////////////////////// // Integrate Constrained Motion // ///////////////////////////////////////// dim3 gridSize2(gridX, 1); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); applyBothConstraint<rootDistribType><<<gridSize2, blockSize >>>(d_x, d_y, d_z, d_vx, d_vy, d_vz, hairLenght, hxy, hz, dt); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); out << cudaGetErrorString(cudaGetLastError()) << std::endl; out << "constraint : " << time << std::endl; ///////////////////////////////////////////////// // Integrate Bending Constrained Motion // ///////////////////////////////////////////////// cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); //applyBendingConstraint<<<gridSize2, blockSize >>>(d_x, d_y, d_z, // d_vx, d_vy, d_vz, // hairLenght, // hxy, // hz, // dt); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); out << cudaGetErrorString(cudaGetLastError()) << std::endl; out << "bending constraint : " << time << std::endl; } ///////////////////////////////////////// // Time integration // ///////////////////////////////////////// cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); integrateK<<<gridSize, blockSize >>>(d_x, d_y, d_z, d_vx, d_vy, d_vz, dt); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); out << "integrate : " << time << std::endl; out.close(); ///////////////////////////////////////// // Transfter data // ///////////////////////////////////////// cudaMemcpy(&(X[0]), d_x, size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&(Y[0]), d_y, size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&(Z[0]), d_z, size * sizeof(float), cudaMemcpyDeviceToHost); } const std::vector<float>& HairSimulation::getMassPositionX() { return X; } const std::vector<float>& HairSimulation::getMassPositionY() { return Y; } const std::vector<float>& HairSimulation::getMassPositionZ() { return Z; }
3d396deb0b39e23b3b8e3f7802e42f7fb4c0ee9f.hip
// !!! This is a file automatically generated by hipify!!! #ifndef MEANSHIFT_CU #define MEANSHIFT_CU #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime_api.h" #include "vector_functions.hpp" #include "hip/hip_vector_types.h" #include "helper_math.h" #include "hip/device_functions.h" #include "commonDefines.h" #define MYASSERT(condition, ERROR) if (!(condition)) { printf("ERROR: %s \n", ERROR); return; } #define rev_sqrt_two_pi 0.3989422804 #define rev_two_pi rev_sqrt_two_pi*rev_sqrt_two_pi __device__ __host__ float gaussian_kernel(float dist2, float bandwidth) { const float rev_bandwidth = 1. / bandwidth; const float d2_frac_b2 = dist2 * rev_bandwidth * rev_bandwidth; float div = 1. / rev_two_pi * rev_bandwidth; float exp_ = div * expf(-0.5 * d2_frac_b2); return exp_; } __global__ void cuda_MeanShift_SharedMemory_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { extern __shared__ float tile[TILE_WIDTH][2]; // for each pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; for (int tile_i = 0; tile_i < (N - 1) / TILE_WIDTH + 1; ++tile_i) { //loading phase - each thread load something into shared memory int row_t = tile_i * TILE_WIDTH + tx; int index = row_t * dim; if (row_t < N) { tile[tx][0] = originalPoints[index]; tile[tx][1] = originalPoints[index + 1]; } else { tile[tx][0] = 0.0; tile[tx][1] = 0.0; } __syncthreads(); //end of loading into shared memory if (row < N) // only the threads inside the bounds do some computation { float2 x_i = make_float2(I[it], I[it + 1]); //load input point //computing phase for (int j = 0; j < TILE_WIDTH; ++j) { float2 x_j = make_float2(tile[j][0], tile[j][1]); //from shared memory float2 sub = x_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } } __syncthreads(); //end of computing phase for tile_ij } if (row < N) { //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_sharedMemory_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_SharedMemory_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } __global__ void cuda_MeanShift_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { // for every pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; float2 y_i; if (row < N) { y_i = make_float2(I[it], I[it + 1]); //load input point //computing mean shift for (int j = 0; j < N; ++j) { float2 x_j = make_float2(originalPoints[j * dim], originalPoints[j * dim + 1]); //from central gpu memory float2 sub = y_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } #endif // !MEANSHIFT_CU
3d396deb0b39e23b3b8e3f7802e42f7fb4c0ee9f.cu
#ifndef MEANSHIFT_CU #define MEANSHIFT_CU #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime_api.h" #include "vector_functions.hpp" #include "vector_types.h" #include "helper_math.h" #include "device_functions.h" #include "commonDefines.h" #define MYASSERT(condition, ERROR) if (!(condition)) { printf("ERROR: %s \n", ERROR); return; } #define rev_sqrt_two_pi 0.3989422804 #define rev_two_pi rev_sqrt_two_pi*rev_sqrt_two_pi __device__ __host__ float gaussian_kernel(float dist2, float bandwidth) { const float rev_bandwidth = 1. / bandwidth; const float d2_frac_b2 = dist2 * rev_bandwidth * rev_bandwidth; float div = 1. / rev_two_pi * rev_bandwidth; float exp_ = div * expf(-0.5 * d2_frac_b2); return exp_; } __global__ void cuda_MeanShift_SharedMemory_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { extern __shared__ float tile[TILE_WIDTH][2]; // for each pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; for (int tile_i = 0; tile_i < (N - 1) / TILE_WIDTH + 1; ++tile_i) { //loading phase - each thread load something into shared memory int row_t = tile_i * TILE_WIDTH + tx; int index = row_t * dim; if (row_t < N) { tile[tx][0] = originalPoints[index]; tile[tx][1] = originalPoints[index + 1]; } else { tile[tx][0] = 0.0; tile[tx][1] = 0.0; } __syncthreads(); //end of loading into shared memory if (row < N) // only the threads inside the bounds do some computation { float2 x_i = make_float2(I[it], I[it + 1]); //load input point //computing phase for (int j = 0; j < TILE_WIDTH; ++j) { float2 x_j = make_float2(tile[j][0], tile[j][1]); //from shared memory float2 sub = x_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } } __syncthreads(); //end of computing phase for tile_ij } if (row < N) { //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_sharedMemory_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_SharedMemory_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } __global__ void cuda_MeanShift_2D(float* X, const float* I, const float* originalPoints, const int N, const int dim) { // for every pixel int tx = threadIdx.x; int row = blockIdx.x * blockDim.x + tx; float2 numerator = make_float2(0.0, 0.0); float denominator = 0.0; int it = row * dim; float2 y_i; if (row < N) { y_i = make_float2(I[it], I[it + 1]); //load input point //computing mean shift for (int j = 0; j < N; ++j) { float2 x_j = make_float2(originalPoints[j * dim], originalPoints[j * dim + 1]); //from central gpu memory float2 sub = y_i - x_j; float distance2 = dot(sub, sub); float weight = gaussian_kernel(distance2, BW); numerator += x_j * weight; //accumulating denominator += weight; } //storing numerator /= denominator; X[it] = numerator.x; X[it + 1] = numerator.y; } } extern "C" void cudaMeanShift_2D_wrapper(float* X, const float* I, const float* originalPoints, const int N, const int vecDim, dim3 gridDim, dim3 blockDim) { cuda_MeanShift_2D << <gridDim, blockDim >> > (X, I, originalPoints, N, vecDim); } #endif // !MEANSHIFT_CU
f5f8d8921f6e3e3117447a93ee446b3752002aee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { constexpr unsigned int init_threads_per_group = 32; constexpr unsigned int init_groups_per_block = 4; constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block; __global__ void __launch_bounds__(init_threads_per_block) gpu_init_statistics_groups(statistics_group* groups, const stats_column_desc* cols, device_2dspan<rowgroup_rows const> rowgroup_bounds) { __shared__ __align__(4) statistics_group group_g[init_groups_per_block]; uint32_t const col_id = blockIdx.y; uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y; uint32_t const t = threadIdx.x; auto const num_rowgroups = rowgroup_bounds.size().first; statistics_group* group = &group_g[threadIdx.y]; if (chunk_id < num_rowgroups and t == 0) { group->col = &cols[col_id]; group->start_row = rowgroup_bounds[chunk_id][col_id].begin; group->num_rows = rowgroup_bounds[chunk_id][col_id].size(); groups[col_id * num_rowgroups + chunk_id] = *group; } } /** * @brief Get the buffer size and offsets of encoded statistics * * @param[in,out] groups Statistics merge groups * @param[in] statistics_count Number of statistics buffers */ constexpr unsigned int buffersize_reduction_dim = 32; constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim; constexpr unsigned int pb_fld_hdrlen = 1; constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length constexpr unsigned int pb_fldlen_int64 = 10; constexpr unsigned int pb_fldlen_float64 = 8; constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64; constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64; template <unsigned int block_size> __global__ void __launch_bounds__(block_size, 1) gpu_init_statistics_buffersize(statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count) { using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ typename block_scan::TempStorage temp_storage; volatile uint32_t stats_size = 0; uint32_t t = threadIdx.x; __syncthreads(); for (uint32_t start = 0; start < statistics_count; start += block_size) { uint32_t stats_len = 0, stats_pos; uint32_t idx = start + t; if (idx < statistics_count) { statistics_dtype const dtype = groups[idx].stats_dtype; switch (dtype) { case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break; case dtype_int8: case dtype_int16: case dtype_int32: case dtype_date32: case dtype_int64: case dtype_timestamp64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64); break; case dtype_float32: case dtype_float64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64); break; case dtype_decimal64: case dtype_decimal128: stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal); break; case dtype_string: stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) + chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length; break; case dtype_none: stats_len = pb_fldlen_common; default: break; } } uint32_t tmp_stats_size; block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size); stats_pos += stats_size; stats_size += tmp_stats_size; if (idx < statistics_count) { groups[idx].start_chunk = stats_pos; groups[idx].num_chunks = stats_len; } __syncthreads(); } } struct stats_state_s { uint8_t* base; ///< Output buffer start uint8_t* end; ///< Output buffer end statistics_chunk chunk; statistics_merge_group group; statistics_dtype stats_dtype; //!< Statistics data type for this column // ORC stats uint64_t numberOfValues; uint8_t hasNull; }; /* * Protobuf encoding - see * https://developers.google.com/protocol-buffers/docs/encoding */ // Protobuf varint encoding for unsigned int __device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v) { while (v > 0x7f) { *p++ = ((uint32_t)v | 0x80); v >>= 7; } *p++ = v; return p; } // Protobuf field encoding for unsigned int __device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v) { p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16 return pb_encode_uint(p + 1, v); } // Protobuf field encoding for signed int __device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v) { int64_t s = (v < 0); return pb_put_uint(p, id, (v ^ -s) * 2 + s); } // Protobuf field encoding for 'packed' unsigned int (single value) __device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v) { uint8_t* p2 = pb_encode_uint(p + 2, v); p[0] = id * 8 + ProtofType::FIXEDLEN; p[1] = static_cast<uint8_t>(p2 - (p + 2)); return p2; } // Protobuf field encoding for binary/string __device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len) { p[0] = id * 8 + ProtofType::FIXEDLEN; p = pb_encode_uint(p + 1, len); memcpy(p, bytes, len); return p + len; } // Protobuf field encoding for 64-bit raw encoding (double) __device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64) { p[0] = id * 8 + ProtofType::FIXED64; memcpy(p + 1, raw64, 8); return p + 9; } /** * @brief Encode statistics in ORC protobuf format * * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers * * ORC statistics format from https://orc.apache.org/specification/ORCv1/ * * message ColumnStatistics { * // the number of values * optional uint64 numberOfValues = 1; * // At most one of these has a value for any column * optional IntegerStatistics intStatistics = 2; * optional DoubleStatistics doubleStatistics = 3; * optional StringStatistics stringStatistics = 4; * optional BucketStatistics bucketStatistics = 5; * optional DecimalStatistics decimalStatistics = 6; * optional DateStatistics dateStatistics = 7; * optional BinaryStatistics binaryStatistics = 8; * optional TimestampStatistics timestampStatistics = 9; * optional bool hasNull = 10; * } */ constexpr unsigned int encode_threads_per_chunk = 32; constexpr unsigned int encode_chunks_per_block = 4; constexpr unsigned int encode_threads_per_block = encode_threads_per_chunk * encode_chunks_per_block; __global__ void __launch_bounds__(encode_threads_per_block) gpu_encode_statistics(uint8_t* blob_bfr, statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count) { __shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block]; uint32_t t = threadIdx.x; uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y; stats_state_s* const s = &state_g[threadIdx.y]; // Encode and update actual bfr size if (idx < statistics_count && t == 0) { s->chunk = chunks[idx]; s->group = groups[idx]; s->stats_dtype = s->group.stats_dtype; s->base = blob_bfr + s->group.start_chunk; s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); uint8_t* fld_start = cur; switch (s->stats_dtype) { case dtype_int8: case dtype_int16: case dtype_int32: case dtype_int64: // intStatistics = 2 // message IntegerStatistics { // optional sint64 minimum = 1; // optional sint64 maximum = 2; // optional sint64 sum = 3; // } if (s->chunk.has_minmax || s->chunk.has_sum) { *cur = 2 * 8 + ProtofType::FIXEDLEN; cur += 2; if (s->chunk.has_minmax) { cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); } if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } fld_start[1] = cur - (fld_start + 2); } break; case dtype_float32: case dtype_float64: // doubleStatistics = 3 // message DoubleStatistics { // optional double minimum = 1; // optional double maximum = 2; // optional double sum = 3; // } if (s->chunk.has_minmax) { *cur = 3 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_string: // stringStatistics = 4 // message StringStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional sint64 sum = 3; // sum will store the total length of all strings // } if (s->chunk.has_minmax && s->chunk.has_sum) { uint32_t sz = (pb_put_int(cur, 3, s->chunk.sum.i_val) - cur) + (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; cur[0] = 4 * 8 + ProtofType::FIXEDLEN; cur = pb_encode_uint(cur + 1, sz); cur = pb_put_binary( cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); cur = pb_put_binary( cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } break; case dtype_bool: // bucketStatistics = 5 // message BucketStatistics { // repeated uint64 count = 1 [packed=true]; // } if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values cur[0] = 5 * 8 + ProtofType::FIXEDLEN; cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_decimal64: case dtype_decimal128: // decimalStatistics = 6 // message DecimalStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional string sum = 3; // } if (s->chunk.has_minmax) { // TODO: Decimal support (decimal min/max stored as strings) } break; case dtype_date32: // dateStatistics = 7 // message DateStatistics { // min,max values saved as days since epoch // optional sint32 minimum = 1; // optional sint32 maximum = 2; // } if (s->chunk.has_minmax) { cur[0] = 7 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_timestamp64: // timestampStatistics = 9 // message TimestampStatistics { // optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch // optional sint64 maximum = 2; // optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch // optional sint64 maximumUtc = 4; // } if (s->chunk.has_minmax) { cur[0] = 9 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc fld_start[1] = cur - (fld_start + 2); } break; default: break; } groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base); } } void orc_init_statistics_groups(statistics_group* groups, const stats_column_desc* cols, device_2dspan<rowgroup_rows const> rowgroup_bounds, rmm::cuda_stream_view stream) { dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block, rowgroup_bounds.size().second); dim3 dim_block(init_threads_per_group, init_groups_per_block); hipLaunchKernelGGL(( gpu_init_statistics_groups), dim3(dim_grid), dim3(dim_block), 0, stream.value(), groups, cols, rowgroup_bounds); } /** * @brief Launches kernels to return statistics buffer offsets and sizes * * @param[in,out] groups Statistics merge groups * @param[in] chunks Statistics chunks * @param[in] statistics_count Number of statistics buffers to encode * @param[in] stream CUDA stream used for device memory operations and kernel launches */ void orc_init_statistics_buffersize(statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { hipLaunchKernelGGL(( gpu_init_statistics_buffersize<block_size>) , dim3(1), dim3(block_size), 0, stream.value(), groups, chunks, statistics_count); } /** * @brief Launches kernel to encode statistics in ORC protobuf format * * @param[out] blob_bfr Output buffer for statistics blobs * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers */ void orc_encode_statistics(uint8_t* blob_bfr, statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { unsigned int num_blocks = (statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block; dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block); hipLaunchKernelGGL(( gpu_encode_statistics), dim3(num_blocks), dim3(dim_block), 0, stream.value(), blob_bfr, groups, chunks, statistics_count); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
f5f8d8921f6e3e3117447a93ee446b3752002aee.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { constexpr unsigned int init_threads_per_group = 32; constexpr unsigned int init_groups_per_block = 4; constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block; __global__ void __launch_bounds__(init_threads_per_block) gpu_init_statistics_groups(statistics_group* groups, const stats_column_desc* cols, device_2dspan<rowgroup_rows const> rowgroup_bounds) { __shared__ __align__(4) statistics_group group_g[init_groups_per_block]; uint32_t const col_id = blockIdx.y; uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y; uint32_t const t = threadIdx.x; auto const num_rowgroups = rowgroup_bounds.size().first; statistics_group* group = &group_g[threadIdx.y]; if (chunk_id < num_rowgroups and t == 0) { group->col = &cols[col_id]; group->start_row = rowgroup_bounds[chunk_id][col_id].begin; group->num_rows = rowgroup_bounds[chunk_id][col_id].size(); groups[col_id * num_rowgroups + chunk_id] = *group; } } /** * @brief Get the buffer size and offsets of encoded statistics * * @param[in,out] groups Statistics merge groups * @param[in] statistics_count Number of statistics buffers */ constexpr unsigned int buffersize_reduction_dim = 32; constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim; constexpr unsigned int pb_fld_hdrlen = 1; constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length constexpr unsigned int pb_fldlen_int64 = 10; constexpr unsigned int pb_fldlen_float64 = 8; constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64; constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64; template <unsigned int block_size> __global__ void __launch_bounds__(block_size, 1) gpu_init_statistics_buffersize(statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count) { using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ typename block_scan::TempStorage temp_storage; volatile uint32_t stats_size = 0; uint32_t t = threadIdx.x; __syncthreads(); for (uint32_t start = 0; start < statistics_count; start += block_size) { uint32_t stats_len = 0, stats_pos; uint32_t idx = start + t; if (idx < statistics_count) { statistics_dtype const dtype = groups[idx].stats_dtype; switch (dtype) { case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break; case dtype_int8: case dtype_int16: case dtype_int32: case dtype_date32: case dtype_int64: case dtype_timestamp64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64); break; case dtype_float32: case dtype_float64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64); break; case dtype_decimal64: case dtype_decimal128: stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal); break; case dtype_string: stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) + chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length; break; case dtype_none: stats_len = pb_fldlen_common; default: break; } } uint32_t tmp_stats_size; block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size); stats_pos += stats_size; stats_size += tmp_stats_size; if (idx < statistics_count) { groups[idx].start_chunk = stats_pos; groups[idx].num_chunks = stats_len; } __syncthreads(); } } struct stats_state_s { uint8_t* base; ///< Output buffer start uint8_t* end; ///< Output buffer end statistics_chunk chunk; statistics_merge_group group; statistics_dtype stats_dtype; //!< Statistics data type for this column // ORC stats uint64_t numberOfValues; uint8_t hasNull; }; /* * Protobuf encoding - see * https://developers.google.com/protocol-buffers/docs/encoding */ // Protobuf varint encoding for unsigned int __device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v) { while (v > 0x7f) { *p++ = ((uint32_t)v | 0x80); v >>= 7; } *p++ = v; return p; } // Protobuf field encoding for unsigned int __device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v) { p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16 return pb_encode_uint(p + 1, v); } // Protobuf field encoding for signed int __device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v) { int64_t s = (v < 0); return pb_put_uint(p, id, (v ^ -s) * 2 + s); } // Protobuf field encoding for 'packed' unsigned int (single value) __device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v) { uint8_t* p2 = pb_encode_uint(p + 2, v); p[0] = id * 8 + ProtofType::FIXEDLEN; p[1] = static_cast<uint8_t>(p2 - (p + 2)); return p2; } // Protobuf field encoding for binary/string __device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len) { p[0] = id * 8 + ProtofType::FIXEDLEN; p = pb_encode_uint(p + 1, len); memcpy(p, bytes, len); return p + len; } // Protobuf field encoding for 64-bit raw encoding (double) __device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64) { p[0] = id * 8 + ProtofType::FIXED64; memcpy(p + 1, raw64, 8); return p + 9; } /** * @brief Encode statistics in ORC protobuf format * * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers * * ORC statistics format from https://orc.apache.org/specification/ORCv1/ * * message ColumnStatistics { * // the number of values * optional uint64 numberOfValues = 1; * // At most one of these has a value for any column * optional IntegerStatistics intStatistics = 2; * optional DoubleStatistics doubleStatistics = 3; * optional StringStatistics stringStatistics = 4; * optional BucketStatistics bucketStatistics = 5; * optional DecimalStatistics decimalStatistics = 6; * optional DateStatistics dateStatistics = 7; * optional BinaryStatistics binaryStatistics = 8; * optional TimestampStatistics timestampStatistics = 9; * optional bool hasNull = 10; * } */ constexpr unsigned int encode_threads_per_chunk = 32; constexpr unsigned int encode_chunks_per_block = 4; constexpr unsigned int encode_threads_per_block = encode_threads_per_chunk * encode_chunks_per_block; __global__ void __launch_bounds__(encode_threads_per_block) gpu_encode_statistics(uint8_t* blob_bfr, statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count) { __shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block]; uint32_t t = threadIdx.x; uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y; stats_state_s* const s = &state_g[threadIdx.y]; // Encode and update actual bfr size if (idx < statistics_count && t == 0) { s->chunk = chunks[idx]; s->group = groups[idx]; s->stats_dtype = s->group.stats_dtype; s->base = blob_bfr + s->group.start_chunk; s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); uint8_t* fld_start = cur; switch (s->stats_dtype) { case dtype_int8: case dtype_int16: case dtype_int32: case dtype_int64: // intStatistics = 2 // message IntegerStatistics { // optional sint64 minimum = 1; // optional sint64 maximum = 2; // optional sint64 sum = 3; // } if (s->chunk.has_minmax || s->chunk.has_sum) { *cur = 2 * 8 + ProtofType::FIXEDLEN; cur += 2; if (s->chunk.has_minmax) { cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); } if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } fld_start[1] = cur - (fld_start + 2); } break; case dtype_float32: case dtype_float64: // doubleStatistics = 3 // message DoubleStatistics { // optional double minimum = 1; // optional double maximum = 2; // optional double sum = 3; // } if (s->chunk.has_minmax) { *cur = 3 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_string: // stringStatistics = 4 // message StringStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional sint64 sum = 3; // sum will store the total length of all strings // } if (s->chunk.has_minmax && s->chunk.has_sum) { uint32_t sz = (pb_put_int(cur, 3, s->chunk.sum.i_val) - cur) + (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; cur[0] = 4 * 8 + ProtofType::FIXEDLEN; cur = pb_encode_uint(cur + 1, sz); cur = pb_put_binary( cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); cur = pb_put_binary( cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } break; case dtype_bool: // bucketStatistics = 5 // message BucketStatistics { // repeated uint64 count = 1 [packed=true]; // } if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values cur[0] = 5 * 8 + ProtofType::FIXEDLEN; cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_decimal64: case dtype_decimal128: // decimalStatistics = 6 // message DecimalStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional string sum = 3; // } if (s->chunk.has_minmax) { // TODO: Decimal support (decimal min/max stored as strings) } break; case dtype_date32: // dateStatistics = 7 // message DateStatistics { // min,max values saved as days since epoch // optional sint32 minimum = 1; // optional sint32 maximum = 2; // } if (s->chunk.has_minmax) { cur[0] = 7 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_timestamp64: // timestampStatistics = 9 // message TimestampStatistics { // optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch // optional sint64 maximum = 2; // optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch // optional sint64 maximumUtc = 4; // } if (s->chunk.has_minmax) { cur[0] = 9 * 8 + ProtofType::FIXEDLEN; cur += 2; cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc fld_start[1] = cur - (fld_start + 2); } break; default: break; } groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base); } } void orc_init_statistics_groups(statistics_group* groups, const stats_column_desc* cols, device_2dspan<rowgroup_rows const> rowgroup_bounds, rmm::cuda_stream_view stream) { dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block, rowgroup_bounds.size().second); dim3 dim_block(init_threads_per_group, init_groups_per_block); gpu_init_statistics_groups<<<dim_grid, dim_block, 0, stream.value()>>>( groups, cols, rowgroup_bounds); } /** * @brief Launches kernels to return statistics buffer offsets and sizes * * @param[in,out] groups Statistics merge groups * @param[in] chunks Statistics chunks * @param[in] statistics_count Number of statistics buffers to encode * @param[in] stream CUDA stream used for device memory operations and kernel launches */ void orc_init_statistics_buffersize(statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { gpu_init_statistics_buffersize<block_size> <<<1, block_size, 0, stream.value()>>>(groups, chunks, statistics_count); } /** * @brief Launches kernel to encode statistics in ORC protobuf format * * @param[out] blob_bfr Output buffer for statistics blobs * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers */ void orc_encode_statistics(uint8_t* blob_bfr, statistics_merge_group* groups, const statistics_chunk* chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { unsigned int num_blocks = (statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block; dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block); gpu_encode_statistics<<<num_blocks, dim_block, 0, stream.value()>>>( blob_bfr, groups, chunks, statistics_count); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
57ae43b426ca214cbd35699867d55f256e799c8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_gamma (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(tgamma)(x[offset_x + gid * stride_x]); } }
57ae43b426ca214cbd35699867d55f256e799c8d.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_gamma (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(tgamma)(x[offset_x + gid * stride_x]); } }
907af455629ecf37a9dc76533b8a3ed8dc059e5d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> #include <rocblas.h> #include "dense_help_func.hpp" #include "dense.cu" int main(int argc, char** argv) { if (argc != 4) { printf("usage: ./main [M] [K] [N]\n"); exit(0); } size_t M = atoi(argv[1]); size_t K = atoi(argv[2]); size_t N = atoi(argv[3]); size_t bytes = sizeof(float) * M * K; float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_C = (float*)malloc(bytes); float* h_C1 = (float*)malloc(bytes); float* d_A; float* d_B; float* d_C; checkCudaErrors(hipMalloc(&d_A, bytes)); checkCudaErrors(hipMalloc(&d_B, bytes)); checkCudaErrors(hipMalloc(&d_C, bytes)); double msecPerMatrixMul[2] = {0, 0}; double gigaFlops[2] = {0, 0}; double flopsPerMatrixMul = 2.0 * M * N * K; const int BLOCK_SIZE_M = 32; const int BLOCK_SIZE_K = 32; const int BLOCK_SIZE_N = 32; const int THREAD_SIZE_X = 4; const int THREAD_SIZE_Y = 4; const bool ENABLE_DOUBLE_BUFFER = false; int k_block = K / BLOCK_SIZE_K; int stride = 2; // A for( int i = 0; i < M * K; i++ ) { int row = (i / K); int col = (i % K); int row_block = row / BLOCK_SIZE_M; int col_block = col / BLOCK_SIZE_K; if ((row_block * k_block + col_block) % stride == 0) h_A[i] = 1; else { h_A[i] = 0; } } // B for( int i = 0; i < K * N; i++ ) { if ( i >= K * N / 2) h_B[i] = 2; else { h_B[i] = 0; } } checkCudaErrors(hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice)); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); float msecTotal = 0; int nIter = 100; checkCudaErrors(hipMemcpy( d_C, h_C, bytes, hipMemcpyHostToDevice)); checkCudaErrors(hipEventRecord(start)); for (int run = 0 ; run < nIter; run ++ ) { dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y); dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M); hipLaunchKernelGGL(( MatrixMulCUDA6<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, ENABLE_DOUBLE_BUFFER>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_A, d_B, d_C, K, N); } checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); checkCudaErrors(hipMemcpy( h_C, d_C, bytes, hipMemcpyDeviceToHost)); msecPerMatrixMul[0] = msecTotal / nIter; gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f); printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n", gigaFlops[0], msecPerMatrixMul[0], flopsPerMatrixMul); // cublas hipblasHandle_t blas_handle; checkCuBlasErrors ( hipblasCreate(&blas_handle) ); float alpha = 1.0; float beta = 0; checkCudaErrors(hipMemcpy( d_C, h_C, bytes, hipMemcpyHostToDevice)); checkCudaErrors(hipEventRecord(start)); for (int run = 0 ; run < nIter; run ++ ) { checkCuBlasErrors ( hipblasSgemm (blas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, M, N, K, &alpha, d_A, M, d_B, K, &beta, d_C, K ) ); } checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); checkCudaErrors(hipMemcpy( h_C1, d_C, bytes, hipMemcpyDeviceToHost)); msecPerMatrixMul[1] = msecTotal / nIter; gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f); printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n", gigaFlops[1], msecPerMatrixMul[1], flopsPerMatrixMul); hipblasDestroy(blas_handle); double eps = 1.e-6; // machine zero bool correct = true; for (int i = 0; i < M * N; i++) { // h_C1 int row = i / N; int col = i % N; double abs_err = fabs(h_C[i] - h_C1[col * M + row]); double dot_length = M; double abs_val = fabs(h_C[i]); double rel_err = abs_err / abs_val / dot_length; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_C1[col * M + row], eps); correct = false; break; } } printf("%s\n", correct ? "Result= PASS" : "Result= FAIL"); printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]); // Free Memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); free(h_C1); }
907af455629ecf37a9dc76533b8a3ed8dc059e5d.cu
#include <stdio.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> #include <cublas_v2.h> #include "dense_help_func.hpp" #include "dense.cu" int main(int argc, char** argv) { if (argc != 4) { printf("usage: ./main [M] [K] [N]\n"); exit(0); } size_t M = atoi(argv[1]); size_t K = atoi(argv[2]); size_t N = atoi(argv[3]); size_t bytes = sizeof(float) * M * K; float* h_A = (float*)malloc(bytes); float* h_B = (float*)malloc(bytes); float* h_C = (float*)malloc(bytes); float* h_C1 = (float*)malloc(bytes); float* d_A; float* d_B; float* d_C; checkCudaErrors(cudaMalloc(&d_A, bytes)); checkCudaErrors(cudaMalloc(&d_B, bytes)); checkCudaErrors(cudaMalloc(&d_C, bytes)); double msecPerMatrixMul[2] = {0, 0}; double gigaFlops[2] = {0, 0}; double flopsPerMatrixMul = 2.0 * M * N * K; const int BLOCK_SIZE_M = 32; const int BLOCK_SIZE_K = 32; const int BLOCK_SIZE_N = 32; const int THREAD_SIZE_X = 4; const int THREAD_SIZE_Y = 4; const bool ENABLE_DOUBLE_BUFFER = false; int k_block = K / BLOCK_SIZE_K; int stride = 2; // 生成A的数据 for( int i = 0; i < M * K; i++ ) { int row = (i / K); int col = (i % K); int row_block = row / BLOCK_SIZE_M; int col_block = col / BLOCK_SIZE_K; if ((row_block * k_block + col_block) % stride == 0) h_A[i] = 1; else { h_A[i] = 0; } } // 生成B的数据 for( int i = 0; i < K * N; i++ ) { if ( i >= K * N / 2) h_B[i] = 2; else { h_B[i] = 0; } } checkCudaErrors(cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice)); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); float msecTotal = 0; int nIter = 100; checkCudaErrors(cudaMemcpy( d_C, h_C, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaEventRecord(start)); for (int run = 0 ; run < nIter; run ++ ) { dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y); dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M); MatrixMulCUDA6<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, ENABLE_DOUBLE_BUFFER> <<< dimGrid, dimBlock >>>(d_A, d_B, d_C, K, N); } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); checkCudaErrors(cudaMemcpy( h_C, d_C, bytes, cudaMemcpyDeviceToHost)); msecPerMatrixMul[0] = msecTotal / nIter; gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f); printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n", gigaFlops[0], msecPerMatrixMul[0], flopsPerMatrixMul); // cublas cublasHandle_t blas_handle; checkCuBlasErrors ( cublasCreate(&blas_handle) ); float alpha = 1.0; float beta = 0; checkCudaErrors(cudaMemcpy( d_C, h_C, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaEventRecord(start)); for (int run = 0 ; run < nIter; run ++ ) { checkCuBlasErrors ( cublasSgemm (blas_handle, CUBLAS_OP_T, CUBLAS_OP_T, M, N, K, &alpha, d_A, M, d_B, K, &beta, d_C, K ) ); } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); checkCudaErrors(cudaMemcpy( h_C1, d_C, bytes, cudaMemcpyDeviceToHost)); msecPerMatrixMul[1] = msecTotal / nIter; gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f); printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n", gigaFlops[1], msecPerMatrixMul[1], flopsPerMatrixMul); cublasDestroy(blas_handle); double eps = 1.e-6; // machine zero bool correct = true; for (int i = 0; i < M * N; i++) { // h_C1 是转置 int row = i / N; int col = i % N; double abs_err = fabs(h_C[i] - h_C1[col * M + row]); double dot_length = M; double abs_val = fabs(h_C[i]); double rel_err = abs_err / abs_val / dot_length; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_C1[col * M + row], eps); correct = false; break; } } printf("%s\n", correct ? "Result= PASS" : "Result= FAIL"); printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]); // Free Memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(h_C1); }
b3f0cdf4cc008e91b4107adfc3d224d87fe8fa8f.hip
// !!! This is a file automatically generated by hipify!!! #include <cstddef> #include <cstdint> #include <random> #include <vector> #define CATCH_CONFIG_MAIN #include <catch.hpp> #include <hip/hip_runtime.h> #include "HeterogeneousTest/CUDAKernel/interface/DeviceAdditionKernel.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" TEST_CASE("HeterogeneousTest/CUDAKernel test", "[cudaTestKernelAdditionKernel]") { cms::cudatest::requireDevices(); // random number generator with a gaussian distribution std::random_device rd{}; std::default_random_engine rand{rd()}; std::normal_distribution<float> dist{0., 1.}; // tolerance constexpr float epsilon = 0.000001; // buffer size constexpr size_t size = 1024 * 1024; // allocate input and output host buffers std::vector<float> in1_h(size); std::vector<float> in2_h(size); std::vector<float> out_h(size); // fill the input buffers with random data, and the output buffer with zeros for (size_t i = 0; i < size; ++i) { in1_h[i] = dist(rand); in2_h[i] = dist(rand); out_h[i] = 0.; } SECTION("Test add_vectors_f") { // allocate input and output buffers on the device float* in1_d; float* in2_d; float* out_d; REQUIRE_NOTHROW(cudaCheck(hipMalloc(&in1_d, size * sizeof(float)))); REQUIRE_NOTHROW(cudaCheck(hipMalloc(&in2_d, size * sizeof(float)))); REQUIRE_NOTHROW(cudaCheck(hipMalloc(&out_d, size * sizeof(float)))); // copy the input data to the device REQUIRE_NOTHROW(cudaCheck(hipMemcpy(in1_d, in1_h.data(), size * sizeof(float), hipMemcpyHostToDevice))); REQUIRE_NOTHROW(cudaCheck(hipMemcpy(in2_d, in2_h.data(), size * sizeof(float), hipMemcpyHostToDevice))); // fill the output buffer with zeros REQUIRE_NOTHROW(cudaCheck(hipMemset(out_d, 0, size * sizeof(float)))); // launch the 1-dimensional kernel for vector addition hipLaunchKernelGGL(( cms::cudatest::kernel_add_vectors_f), dim3(32), dim3(32), 0, 0, in1_d, in2_d, out_d, size); REQUIRE_NOTHROW(cudaCheck(hipGetLastError())); // copy the results from the device to the host REQUIRE_NOTHROW(cudaCheck(hipMemcpy(out_h.data(), out_d, size * sizeof(float), hipMemcpyDeviceToHost))); // wait for all the operations to complete REQUIRE_NOTHROW(cudaCheck(hipDeviceSynchronize())); // check the results for (size_t i = 0; i < size; ++i) { float sum = in1_h[i] + in2_h[i]; CHECK_THAT(out_h[i], Catch::Matchers::WithinAbs(sum, epsilon)); } } }
b3f0cdf4cc008e91b4107adfc3d224d87fe8fa8f.cu
#include <cstddef> #include <cstdint> #include <random> #include <vector> #define CATCH_CONFIG_MAIN #include <catch.hpp> #include <cuda_runtime.h> #include "HeterogeneousTest/CUDAKernel/interface/DeviceAdditionKernel.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" TEST_CASE("HeterogeneousTest/CUDAKernel test", "[cudaTestKernelAdditionKernel]") { cms::cudatest::requireDevices(); // random number generator with a gaussian distribution std::random_device rd{}; std::default_random_engine rand{rd()}; std::normal_distribution<float> dist{0., 1.}; // tolerance constexpr float epsilon = 0.000001; // buffer size constexpr size_t size = 1024 * 1024; // allocate input and output host buffers std::vector<float> in1_h(size); std::vector<float> in2_h(size); std::vector<float> out_h(size); // fill the input buffers with random data, and the output buffer with zeros for (size_t i = 0; i < size; ++i) { in1_h[i] = dist(rand); in2_h[i] = dist(rand); out_h[i] = 0.; } SECTION("Test add_vectors_f") { // allocate input and output buffers on the device float* in1_d; float* in2_d; float* out_d; REQUIRE_NOTHROW(cudaCheck(cudaMalloc(&in1_d, size * sizeof(float)))); REQUIRE_NOTHROW(cudaCheck(cudaMalloc(&in2_d, size * sizeof(float)))); REQUIRE_NOTHROW(cudaCheck(cudaMalloc(&out_d, size * sizeof(float)))); // copy the input data to the device REQUIRE_NOTHROW(cudaCheck(cudaMemcpy(in1_d, in1_h.data(), size * sizeof(float), cudaMemcpyHostToDevice))); REQUIRE_NOTHROW(cudaCheck(cudaMemcpy(in2_d, in2_h.data(), size * sizeof(float), cudaMemcpyHostToDevice))); // fill the output buffer with zeros REQUIRE_NOTHROW(cudaCheck(cudaMemset(out_d, 0, size * sizeof(float)))); // launch the 1-dimensional kernel for vector addition cms::cudatest::kernel_add_vectors_f<<<32, 32>>>(in1_d, in2_d, out_d, size); REQUIRE_NOTHROW(cudaCheck(cudaGetLastError())); // copy the results from the device to the host REQUIRE_NOTHROW(cudaCheck(cudaMemcpy(out_h.data(), out_d, size * sizeof(float), cudaMemcpyDeviceToHost))); // wait for all the operations to complete REQUIRE_NOTHROW(cudaCheck(cudaDeviceSynchronize())); // check the results for (size_t i = 0; i < size; ++i) { float sum = in1_h[i] + in2_h[i]; CHECK_THAT(out_h[i], Catch::Matchers::WithinAbs(sum, epsilon)); } } }
cb5d1a4fdb710b174a5f020bc5e3f186b511646a.hip
// !!! This is a file automatically generated by hipify!!! #include <R.h> #include <Rdefines.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include "cudaLogReg.h" #include "model_logreg_cuda.h" #include "entrypoints-model_logreg_cuda.h" SEXP get_condprob_logreg_cuda(SEXP X, SEXP W, SEXP normalize, SEXP log_domain) { // initializing protection counter int nprot = 0; // Getting pointers to underlying variables int * normalize_ptr = LOGICAL(normalize); int * log_domain_ptr = LOGICAL(log_domain); // Getting pointers to underlying C arrays. // Host pointers double * X_ptr = (double *) REAL(X); // N X M double * W_ptr = (double *) REAL(W); // K X M // getting arrays underlying dimension int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); // Getting dimensions int N = dim_X[0], M = dim_X[1], K = dim_W[0]; // assigining for class conditional probabilities Y. is K X N SEXP Y; PROTECT(Y = allocMatrix(REALSXP, K, N)); nprot++; memset(REAL(Y), 0.0, K * N * sizeof(double)); // Initializing Y double * Y_ptr = (double *)REAL(Y); // Getting pointers to Y // device pointers double * dev_Y_ptr; double * dev_W_ptr; double * dev_X_ptr; // Initializing cublas handle hipblasHandle_t handle; // allocating memory on the device CUDA_CALL(hipMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_Y_ptr, K * N * sizeof(double))); // Creating handle CUBLAS_CALL(hipblasCreate(&handle)); // Copying matrices to device CUBLAS_CALL(hipblasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(hipblasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); // Setting condtional probabilities _set_condprob_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_Y_ptr, M, N, K, *normalize_ptr, *log_domain_ptr); // Copying to host CUBLAS_CALL(hipblasGetMatrix(K, N, sizeof(double), dev_Y_ptr, K, Y_ptr, K)); hipFree(dev_X_ptr); hipFree(dev_W_ptr); hipFree(dev_Y_ptr); hipblasDestroy(handle); UNPROTECT(nprot); return Y; } // Cross entropy (minus log-likelihood) SEXP get_cost_logreg_cuda(SEXP X, SEXP W, SEXP T, SEXP decay) { // initializing protection counter int nprot = 0; // Assigning SEXP Variable for cross-entropy SEXP cost; PROTECT(cost = allocVector(REALSXP, 1)); nprot++; // Initializing cross entropy memset(REAL(cost), 0.0, sizeof(double)); // Getting pointers to underlying arrays double * X_ptr = REAL(X); // N X K double * dev_X_ptr; double * W_ptr = REAL(W); // K X M double * dev_W_ptr; double * T_ptr = REAL(T); // K X N double * dev_T_ptr; double * dev_log_Y_ptr; double * cost_ptr = REAL(cost); double * decay_ptr = REAL(decay); // getting arrays underlying dimension int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); // Getting dimensions int N = dim_X[0], M = dim_X[1], K = dim_W[0]; // Allocating and transfering to device // Initializing cublas handle hipblasHandle_t handle; // Creating handle CUBLAS_CALL(hipblasCreate(&handle)); // allocating memory on the device CUDA_CALL(hipMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_T_ptr, K * N * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_log_Y_ptr, K * N * sizeof(double))); // Copying matrices to device CUBLAS_CALL(hipblasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(hipblasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); CUBLAS_CALL(hipblasSetMatrix(K, N, sizeof(double), T_ptr, K, dev_T_ptr, K)); // Computing cost on the device but returning on hist _set_cost_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_T_ptr, dev_log_Y_ptr, M, N, K, decay_ptr, cost_ptr); // Cleaning up hipFree(dev_X_ptr); hipFree(dev_W_ptr); hipFree(dev_T_ptr); hipFree(dev_log_Y_ptr); hipblasDestroy(handle); UNPROTECT(nprot); return cost; } SEXP get_grad_logreg_cuda(SEXP X, SEXP W, SEXP T, SEXP decay) { // Initializing protection counte int nprot = 0; // Getting pointers to underlying C arrays. double * X_ptr = REAL(X); // N X M double * W_ptr = REAL(W); // K X M double * T_ptr = REAL(T); // K X N double * decay_ptr = REAL(decay); // getting arrays underlying dimension */ int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); int M = dim_X[1], N = dim_X[0], K = dim_W[0]; double * dev_W_ptr; double * dev_X_ptr; double * dev_T_ptr; double * dev_Y_ptr; double * dev_Y_minus_T_ptr; // Defining and allocating return SEXP SEXP grad; PROTECT(grad = allocMatrix(REALSXP, K, M)); nprot++; double * grad_ptr = REAL(grad); double * dev_grad_ptr; memset(grad_ptr, 0.0, sizeof(double) * K * M); // Allocating and transfering to device // Initializing cublas handle hipblasHandle_t handle; // Creating handle CUBLAS_CALL(hipblasCreate(&handle)); // allocating memory on the device CUDA_CALL(hipMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_T_ptr, K * N * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_Y_ptr, K * N * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_Y_minus_T_ptr, K * N * sizeof(double))); CUDA_CALL(hipMalloc((void **) &dev_grad_ptr, K * M * sizeof(double))); // Copying matrices to device CUBLAS_CALL(hipblasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(hipblasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); CUBLAS_CALL(hipblasSetMatrix(K, N, sizeof(double), T_ptr, K, dev_T_ptr, K)); // Computing gradient _set_grad_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_T_ptr, dev_Y_ptr, dev_Y_minus_T_ptr, M, N, K, decay_ptr, dev_grad_ptr); // Transferring data to host // Copying to host CUBLAS_CALL(hipblasGetMatrix(K, M, sizeof(double), dev_grad_ptr, K, grad_ptr, K)); // freeing allocated memory hipFree(dev_X_ptr); hipFree(dev_W_ptr); hipFree(dev_T_ptr); hipFree(dev_Y_ptr); hipFree(dev_Y_minus_T_ptr); hipFree(dev_grad_ptr); hipblasDestroy(handle); UNPROTECT(nprot); return grad; }
cb5d1a4fdb710b174a5f020bc5e3f186b511646a.cu
#include <R.h> #include <Rdefines.h> #include <cuda.h> #include <cublas_v2.h> #include "cudaLogReg.h" #include "model_logreg_cuda.h" #include "entrypoints-model_logreg_cuda.h" SEXP get_condprob_logreg_cuda(SEXP X, SEXP W, SEXP normalize, SEXP log_domain) { // initializing protection counter int nprot = 0; // Getting pointers to underlying variables int * normalize_ptr = LOGICAL(normalize); int * log_domain_ptr = LOGICAL(log_domain); // Getting pointers to underlying C arrays. // Host pointers double * X_ptr = (double *) REAL(X); // N X M double * W_ptr = (double *) REAL(W); // K X M // getting arrays underlying dimension int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); // Getting dimensions int N = dim_X[0], M = dim_X[1], K = dim_W[0]; // assigining for class conditional probabilities Y. is K X N SEXP Y; PROTECT(Y = allocMatrix(REALSXP, K, N)); nprot++; memset(REAL(Y), 0.0, K * N * sizeof(double)); // Initializing Y double * Y_ptr = (double *)REAL(Y); // Getting pointers to Y // device pointers double * dev_Y_ptr; double * dev_W_ptr; double * dev_X_ptr; // Initializing cublas handle cublasHandle_t handle; // allocating memory on the device CUDA_CALL(cudaMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_Y_ptr, K * N * sizeof(double))); // Creating handle CUBLAS_CALL(cublasCreate(&handle)); // Copying matrices to device CUBLAS_CALL(cublasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(cublasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); // Setting condtional probabilities _set_condprob_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_Y_ptr, M, N, K, *normalize_ptr, *log_domain_ptr); // Copying to host CUBLAS_CALL(cublasGetMatrix(K, N, sizeof(double), dev_Y_ptr, K, Y_ptr, K)); cudaFree(dev_X_ptr); cudaFree(dev_W_ptr); cudaFree(dev_Y_ptr); cublasDestroy(handle); UNPROTECT(nprot); return Y; } // Cross entropy (minus log-likelihood) SEXP get_cost_logreg_cuda(SEXP X, SEXP W, SEXP T, SEXP decay) { // initializing protection counter int nprot = 0; // Assigning SEXP Variable for cross-entropy SEXP cost; PROTECT(cost = allocVector(REALSXP, 1)); nprot++; // Initializing cross entropy memset(REAL(cost), 0.0, sizeof(double)); // Getting pointers to underlying arrays double * X_ptr = REAL(X); // N X K double * dev_X_ptr; double * W_ptr = REAL(W); // K X M double * dev_W_ptr; double * T_ptr = REAL(T); // K X N double * dev_T_ptr; double * dev_log_Y_ptr; double * cost_ptr = REAL(cost); double * decay_ptr = REAL(decay); // getting arrays underlying dimension int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); // Getting dimensions int N = dim_X[0], M = dim_X[1], K = dim_W[0]; // Allocating and transfering to device // Initializing cublas handle cublasHandle_t handle; // Creating handle CUBLAS_CALL(cublasCreate(&handle)); // allocating memory on the device CUDA_CALL(cudaMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_T_ptr, K * N * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_log_Y_ptr, K * N * sizeof(double))); // Copying matrices to device CUBLAS_CALL(cublasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(cublasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); CUBLAS_CALL(cublasSetMatrix(K, N, sizeof(double), T_ptr, K, dev_T_ptr, K)); // Computing cost on the device but returning on hist _set_cost_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_T_ptr, dev_log_Y_ptr, M, N, K, decay_ptr, cost_ptr); // Cleaning up cudaFree(dev_X_ptr); cudaFree(dev_W_ptr); cudaFree(dev_T_ptr); cudaFree(dev_log_Y_ptr); cublasDestroy(handle); UNPROTECT(nprot); return cost; } SEXP get_grad_logreg_cuda(SEXP X, SEXP W, SEXP T, SEXP decay) { // Initializing protection counte int nprot = 0; // Getting pointers to underlying C arrays. double * X_ptr = REAL(X); // N X M double * W_ptr = REAL(W); // K X M double * T_ptr = REAL(T); // K X N double * decay_ptr = REAL(decay); // getting arrays underlying dimension */ int * dim_X = INTEGER(GET_DIM(X)); int * dim_W = INTEGER(GET_DIM(W)); int M = dim_X[1], N = dim_X[0], K = dim_W[0]; double * dev_W_ptr; double * dev_X_ptr; double * dev_T_ptr; double * dev_Y_ptr; double * dev_Y_minus_T_ptr; // Defining and allocating return SEXP SEXP grad; PROTECT(grad = allocMatrix(REALSXP, K, M)); nprot++; double * grad_ptr = REAL(grad); double * dev_grad_ptr; memset(grad_ptr, 0.0, sizeof(double) * K * M); // Allocating and transfering to device // Initializing cublas handle cublasHandle_t handle; // Creating handle CUBLAS_CALL(cublasCreate(&handle)); // allocating memory on the device CUDA_CALL(cudaMalloc((void **) &dev_X_ptr, N * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_W_ptr, K * M * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_T_ptr, K * N * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_Y_ptr, K * N * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_Y_minus_T_ptr, K * N * sizeof(double))); CUDA_CALL(cudaMalloc((void **) &dev_grad_ptr, K * M * sizeof(double))); // Copying matrices to device CUBLAS_CALL(cublasSetMatrix(N, M, sizeof(double), X_ptr, N, dev_X_ptr, N)); CUBLAS_CALL(cublasSetMatrix(K, M, sizeof(double), W_ptr, K, dev_W_ptr, K)); CUBLAS_CALL(cublasSetMatrix(K, N, sizeof(double), T_ptr, K, dev_T_ptr, K)); // Computing gradient _set_grad_logreg_cuda(handle, dev_X_ptr, dev_W_ptr, dev_T_ptr, dev_Y_ptr, dev_Y_minus_T_ptr, M, N, K, decay_ptr, dev_grad_ptr); // Transferring data to host // Copying to host CUBLAS_CALL(cublasGetMatrix(K, M, sizeof(double), dev_grad_ptr, K, grad_ptr, K)); // freeing allocated memory cudaFree(dev_X_ptr); cudaFree(dev_W_ptr); cudaFree(dev_T_ptr); cudaFree(dev_Y_ptr); cudaFree(dev_Y_minus_T_ptr); cudaFree(dev_grad_ptr); cublasDestroy(handle); UNPROTECT(nprot); return grad; }
7ae2098ef62cf0d6fb477abe53b8beb764069751.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _UPSAMPLE_CUDA_KERNEL #define _UPSAMPLE_CUDA_KERNEL #include "cta_config.h" __global__ void upSample(float* input, float* output, int num_input_rows, int num_input_cols) { int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = gridDim.x * blockDim.x; int numElements = num_input_rows * num_input_cols; __shared__ float tmp_buffer[NUM_THREADS]; for (int i = index; i < numElements; i += numThreads) { // Load data into shared memory tmp_buffer[tid] = input[i]; __syncthreads(); for (int kx = 0; kx < 2; ++kx) { for (int ky = 0; ky < 2; ++ky) { int local_index = (ky * blockDim.x + tid) / 2; float curr_val = tmp_buffer[local_index]; output[(kx * 2 + ky) * numElements + i] = curr_val; } } __syncthreads(); } return; } #endif
7ae2098ef62cf0d6fb477abe53b8beb764069751.cu
#ifndef _UPSAMPLE_CUDA_KERNEL #define _UPSAMPLE_CUDA_KERNEL #include "cta_config.h" __global__ void upSample(float* input, float* output, int num_input_rows, int num_input_cols) { int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = gridDim.x * blockDim.x; int numElements = num_input_rows * num_input_cols; __shared__ float tmp_buffer[NUM_THREADS]; for (int i = index; i < numElements; i += numThreads) { // Load data into shared memory tmp_buffer[tid] = input[i]; __syncthreads(); for (int kx = 0; kx < 2; ++kx) { for (int ky = 0; ky < 2; ++ky) { int local_index = (ky * blockDim.x + tid) / 2; float curr_val = tmp_buffer[local_index]; output[(kx * 2 + ky) * numElements + i] = curr_val; } } __syncthreads(); } return; } #endif
5bd70ac44e3207adb8f867296a36973061486527.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "common.h" #include "utils.h" extern const char* version_name; int parse_args(int* reps, int p_id, int argc, char **argv); int my_abort(int line, int code); #define MY_ABORT(ret) my_abort(__LINE__, ret) #define ABORT_IF_ERROR(ret) CHECK_ERROR(ret, MY_ABORT(ret)) #define ABORT_IF_NULL(ret) CHECK_NULL(ret, MY_ABORT(NO_MEM)) #define INDENT " " #define TIME_DIFF(start, stop) 1.0 * (stop.tv_sec - start.tv_sec) + 1e-6 * (stop.tv_usec - start.tv_usec) int main(int argc, char **argv) { int reps, i, ret; dist_matrix_t mat; double compute_time = 0, pre_time; data_t *x; data_t *y; data_t *cpu_buffer; struct timeval start, stop; double gflops, compute_time_per_run; ret = parse_args(&reps, 0, argc, argv); ABORT_IF_ERROR(ret) ret = read_matrix_default(&mat, argv[2]); ABORT_IF_ERROR(ret) printf("Benchmarking %s on %s.\n", version_name, argv[2]); printf(INDENT"%d x %d, %d non-zeros, %d run(s)\n", \ mat.global_m, mat.global_m, mat.global_nnz, reps); printf(INDENT"Preprocessing.\n"); gettimeofday(&start, NULL); preprocess(&mat); gettimeofday(&stop, NULL); pre_time = TIME_DIFF(start, stop); cpu_buffer = (data_t*) malloc(sizeof(data_t) * mat.global_m); ABORT_IF_NULL(cpu_buffer) ret = read_vector(&mat, argv[2], "_x.vec", 1, cpu_buffer); ABORT_IF_ERROR(ret) ret = hipMalloc(&x, sizeof(data_t) * mat.global_m); ABORT_IF_ERROR(ret) ret = hipMalloc(&y, sizeof(data_t) * mat.global_m); ABORT_IF_ERROR(ret) hipMemcpy(x, cpu_buffer, sizeof(data_t) * mat.global_m, hipMemcpyHostToDevice); /* warm up */ printf(INDENT"Warming up.\n"); hipMemset(y, 0, sizeof(double) * mat.global_m); spmv(&mat, x, y); printf(INDENT"Testing.\n"); for(i = 0; i < reps; ++i) { hipMemset(y, 0, sizeof(double) * mat.global_m); gettimeofday(&start, NULL); spmv(&mat, x, y); hipDeviceSynchronize(); gettimeofday(&stop, NULL); compute_time += TIME_DIFF(start, stop); } hipMemcpy(cpu_buffer, y, sizeof(data_t) * mat.global_m, hipMemcpyDeviceToHost); printf(INDENT"Checking.\n"); ret = check_answer(&mat, argv[2], cpu_buffer); if(ret == 0) { printf("\e[1;32m"INDENT"Result validated.\e[0m\n"); } else { fprintf(stderr, "\e[1;31m"INDENT"Result NOT validated.\e[0m\n"); MY_ABORT(ret); } destroy_dist_matrix(&mat); free(cpu_buffer); hipFree(y); hipFree(x); gflops = 2e-9 * mat.global_nnz * reps / compute_time; compute_time_per_run = compute_time / reps; printf(INDENT INDENT"preprocess time = %lf s, compute time = %lf s per run\n", \ pre_time, compute_time_per_run); printf("\e[1;34m"INDENT INDENT"Performance: %lf Gflop/s\e[0m\n", gflops); return 0; } void print_help(const char *argv0, int p_id) { if(p_id == 0) { printf("\e[1;31mUSAGE: %s <repetitions> <input-file>\e[0m\n", argv0); } } int parse_args(int* reps, int p_id, int argc, char **argv) { int r; if(argc < 3) { print_help(argv[0], p_id); return 1; } r = atoi(argv[1]); if(r <= 0) { print_help(argv[0], p_id); return 1; } *reps = r; return SUCCESS; } int my_abort(int line, int code) { fprintf(stderr, "\e[1;33merror at line %d, error code = %d\e[0m\n", line, code); return fatal_error(code); } int fatal_error(int code) { exit(code); return code; }
5bd70ac44e3207adb8f867296a36973061486527.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <cuda.h> #include <sys/time.h> #include "common.h" #include "utils.h" extern const char* version_name; int parse_args(int* reps, int p_id, int argc, char **argv); int my_abort(int line, int code); #define MY_ABORT(ret) my_abort(__LINE__, ret) #define ABORT_IF_ERROR(ret) CHECK_ERROR(ret, MY_ABORT(ret)) #define ABORT_IF_NULL(ret) CHECK_NULL(ret, MY_ABORT(NO_MEM)) #define INDENT " " #define TIME_DIFF(start, stop) 1.0 * (stop.tv_sec - start.tv_sec) + 1e-6 * (stop.tv_usec - start.tv_usec) int main(int argc, char **argv) { int reps, i, ret; dist_matrix_t mat; double compute_time = 0, pre_time; data_t *x; data_t *y; data_t *cpu_buffer; struct timeval start, stop; double gflops, compute_time_per_run; ret = parse_args(&reps, 0, argc, argv); ABORT_IF_ERROR(ret) ret = read_matrix_default(&mat, argv[2]); ABORT_IF_ERROR(ret) printf("Benchmarking %s on %s.\n", version_name, argv[2]); printf(INDENT"%d x %d, %d non-zeros, %d run(s)\n", \ mat.global_m, mat.global_m, mat.global_nnz, reps); printf(INDENT"Preprocessing.\n"); gettimeofday(&start, NULL); preprocess(&mat); gettimeofday(&stop, NULL); pre_time = TIME_DIFF(start, stop); cpu_buffer = (data_t*) malloc(sizeof(data_t) * mat.global_m); ABORT_IF_NULL(cpu_buffer) ret = read_vector(&mat, argv[2], "_x.vec", 1, cpu_buffer); ABORT_IF_ERROR(ret) ret = cudaMalloc(&x, sizeof(data_t) * mat.global_m); ABORT_IF_ERROR(ret) ret = cudaMalloc(&y, sizeof(data_t) * mat.global_m); ABORT_IF_ERROR(ret) cudaMemcpy(x, cpu_buffer, sizeof(data_t) * mat.global_m, cudaMemcpyHostToDevice); /* warm up */ printf(INDENT"Warming up.\n"); cudaMemset(y, 0, sizeof(double) * mat.global_m); spmv(&mat, x, y); printf(INDENT"Testing.\n"); for(i = 0; i < reps; ++i) { cudaMemset(y, 0, sizeof(double) * mat.global_m); gettimeofday(&start, NULL); spmv(&mat, x, y); cudaDeviceSynchronize(); gettimeofday(&stop, NULL); compute_time += TIME_DIFF(start, stop); } cudaMemcpy(cpu_buffer, y, sizeof(data_t) * mat.global_m, cudaMemcpyDeviceToHost); printf(INDENT"Checking.\n"); ret = check_answer(&mat, argv[2], cpu_buffer); if(ret == 0) { printf("\e[1;32m"INDENT"Result validated.\e[0m\n"); } else { fprintf(stderr, "\e[1;31m"INDENT"Result NOT validated.\e[0m\n"); MY_ABORT(ret); } destroy_dist_matrix(&mat); free(cpu_buffer); cudaFree(y); cudaFree(x); gflops = 2e-9 * mat.global_nnz * reps / compute_time; compute_time_per_run = compute_time / reps; printf(INDENT INDENT"preprocess time = %lf s, compute time = %lf s per run\n", \ pre_time, compute_time_per_run); printf("\e[1;34m"INDENT INDENT"Performance: %lf Gflop/s\e[0m\n", gflops); return 0; } void print_help(const char *argv0, int p_id) { if(p_id == 0) { printf("\e[1;31mUSAGE: %s <repetitions> <input-file>\e[0m\n", argv0); } } int parse_args(int* reps, int p_id, int argc, char **argv) { int r; if(argc < 3) { print_help(argv[0], p_id); return 1; } r = atoi(argv[1]); if(r <= 0) { print_help(argv[0], p_id); return 1; } *reps = r; return SUCCESS; } int my_abort(int line, int code) { fprintf(stderr, "\e[1;33merror at line %d, error code = %d\e[0m\n", line, code); return fatal_error(code); } int fatal_error(int code) { exit(code); return code; }
a5ddc430d26e49de613f7528cb0938b42cc6ac78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cstring> #include <iostream> #include <memory> #include <utility> constexpr auto block_size = 16; bool almost_equal(float a, float b) { return a - b < 0.00001f || b - a < 0.00001f; } class Matrix; void swap(Matrix &left, Matrix &right) noexcept; struct Matrix { size_t row; size_t col; std::unique_ptr<float[]> elements; float *gpu_elements; float get(int row, int col) const { return *(elements.get() + row * this->col + col); } void set(int row, int col, float value) { *(elements.get() + row * this->col + col) = value; } ~Matrix() { if (gpu_elements) hipFree(gpu_elements); } Matrix(size_t row, size_t col) : row(row), col(col) { elements = std::unique_ptr<float[]>(new float[row * col]); } Matrix(const Matrix &other) : row(other.row), col(other.col), elements(new float[other.row * other.col]), gpu_elements(nullptr) { std::memcpy(elements.get(), other.elements.get(), row * col * sizeof(float)); } Matrix &operator=(const Matrix &other) { using std::swap; if (this == &other) { return *this; } Matrix mat(other); swap(*this, mat); return *this; } void swap(Matrix &other) noexcept { using std::swap; swap(row, other.row); swap(col, other.col); swap(elements, other.elements); } }; void swap(Matrix &left, Matrix &right) noexcept { left.swap(right); } void print_matrix(const Matrix &mat) { for (int i = 0; i < mat.row; ++i) { for (int j = 0; j < mat.col; ++j) { std::cout << mat.get(i, j) << ", "; } std::cout << std::endl; } } struct GPUMatrix { size_t row; size_t col; size_t pitch; float *elements; __device__ float get(int row_idx, int col_idx) const { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); return row[col_idx]; } __device__ void set(int row_idx, int col_idx, float value) { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); row[col_idx] = value; } }; GPUMatrix load_to_gpu(Matrix &matrix) { size_t pitch; hipMallocPitch(&matrix.gpu_elements, &pitch, matrix.col * sizeof(float), matrix.row); hipMemcpy2D(matrix.gpu_elements, pitch, matrix.elements.get(), matrix.col * sizeof(float), matrix.col * sizeof(float), matrix.row, hipMemcpyHostToDevice); return GPUMatrix{matrix.row, matrix.col, pitch, matrix.gpu_elements}; } Matrix copy_to_host(GPUMatrix matrix) { Matrix new_matrix(matrix.row, matrix.col); hipError_t err = hipMemcpy2D(new_matrix.elements.get(), matrix.col * sizeof(float), matrix.elements, matrix.pitch, matrix.col * sizeof(float), matrix.row, hipMemcpyDeviceToHost); std::cout << "hipMemcpy2D done: " << hipGetErrorString(err) << std::endl; return new_matrix; } __global__ void matrix_mul_strait(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { int row = blockDim.x * blockIdx.x + threadIdx.x; int col = blockDim.y * blockIdx.y + threadIdx.y; // target index: [row][col] float element = 0; for (int i = 0; i < mat0.col; ++i) { element += mat0.get(row, i) * mat1.get(i, col); } // printf("(%d, %d): %d\n", row, col, int(element)); result.set(row, col, element); } __global__ void matrix_mul_shared(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { const int row_min = blockDim.x * blockIdx.x; const int col_min = blockDim.y * blockIdx.y; const int target_row = blockDim.x * blockIdx.x + threadIdx.x; const int target_col = blockDim.y * blockIdx.y + threadIdx.y; // assign matrix values in range // row: [row_min, row_max), col: [col_min, col_max) // iterate mat0.col / blockDim.x times. float c_value = 0.0f; for (int iter = 0, base = 0; iter < int(mat0.col / blockDim.y); ++iter, base += block_size) { __shared__ float mat0_submatrix[block_size][block_size]; __shared__ float mat1_submatrix[block_size][block_size]; mat0_submatrix[threadIdx.x][threadIdx.y] = mat0.get( row_min + threadIdx.x, base + threadIdx.y); mat1_submatrix[threadIdx.x][threadIdx.y] = mat1.get( base + threadIdx.x, col_min + threadIdx.y); __syncthreads(); for (int i = 0; i < block_size; ++i) { c_value += mat0_submatrix[threadIdx.x][i] * mat1_submatrix[i][threadIdx.y]; } __syncthreads(); } result.set(target_row, target_col, c_value); } int main() { Matrix host_matrix_A(64, 128); for (int i = 0; i < host_matrix_A.row; ++i) { for (int j = 0; j < host_matrix_A.col; ++j) { host_matrix_A.set(i, j, 1.0); } } Matrix host_matrix_B(128, 64); for (int i = 0; i < host_matrix_B.row; ++i) { for (int j = 0; j < host_matrix_B.col; ++j) { host_matrix_B.set(i, j, 1.0); } } Matrix host_matrix_C(64, 64); for (int i = 0; i < host_matrix_C.row; ++i) { for (int j = 0; j < host_matrix_C.col; ++j) { host_matrix_C.set(i, j, 0.0); } } GPUMatrix gpu_matrix_A = load_to_gpu(host_matrix_A); GPUMatrix gpu_matrix_B = load_to_gpu(host_matrix_B); GPUMatrix gpu_matrix_C = load_to_gpu(host_matrix_C); dim3 block_dim(16, 16); dim3 grid_dim(64 / 16, 64 / 16); hipLaunchKernelGGL(( matrix_mul_shared), dim3(grid_dim), dim3(block_dim), 0, 0, gpu_matrix_A, gpu_matrix_B, gpu_matrix_C); host_matrix_C = copy_to_host(gpu_matrix_C); print_matrix(host_matrix_C); return 0; }
a5ddc430d26e49de613f7528cb0938b42cc6ac78.cu
#include <algorithm> #include <cstring> #include <iostream> #include <memory> #include <utility> constexpr auto block_size = 16; bool almost_equal(float a, float b) { return a - b < 0.00001f || b - a < 0.00001f; } class Matrix; void swap(Matrix &left, Matrix &right) noexcept; struct Matrix { size_t row; size_t col; std::unique_ptr<float[]> elements; float *gpu_elements; float get(int row, int col) const { return *(elements.get() + row * this->col + col); } void set(int row, int col, float value) { *(elements.get() + row * this->col + col) = value; } ~Matrix() { if (gpu_elements) cudaFree(gpu_elements); } Matrix(size_t row, size_t col) : row(row), col(col) { elements = std::unique_ptr<float[]>(new float[row * col]); } Matrix(const Matrix &other) : row(other.row), col(other.col), elements(new float[other.row * other.col]), gpu_elements(nullptr) { std::memcpy(elements.get(), other.elements.get(), row * col * sizeof(float)); } Matrix &operator=(const Matrix &other) { using std::swap; if (this == &other) { return *this; } Matrix mat(other); swap(*this, mat); return *this; } void swap(Matrix &other) noexcept { using std::swap; swap(row, other.row); swap(col, other.col); swap(elements, other.elements); } }; void swap(Matrix &left, Matrix &right) noexcept { left.swap(right); } void print_matrix(const Matrix &mat) { for (int i = 0; i < mat.row; ++i) { for (int j = 0; j < mat.col; ++j) { std::cout << mat.get(i, j) << ", "; } std::cout << std::endl; } } struct GPUMatrix { size_t row; size_t col; size_t pitch; float *elements; __device__ float get(int row_idx, int col_idx) const { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); return row[col_idx]; } __device__ void set(int row_idx, int col_idx, float value) { float *row = reinterpret_cast<float *>(reinterpret_cast<char *>(elements) + row_idx * pitch); row[col_idx] = value; } }; GPUMatrix load_to_gpu(Matrix &matrix) { size_t pitch; cudaMallocPitch(&matrix.gpu_elements, &pitch, matrix.col * sizeof(float), matrix.row); cudaMemcpy2D(matrix.gpu_elements, pitch, matrix.elements.get(), matrix.col * sizeof(float), matrix.col * sizeof(float), matrix.row, cudaMemcpyHostToDevice); return GPUMatrix{matrix.row, matrix.col, pitch, matrix.gpu_elements}; } Matrix copy_to_host(GPUMatrix matrix) { Matrix new_matrix(matrix.row, matrix.col); cudaError_t err = cudaMemcpy2D(new_matrix.elements.get(), matrix.col * sizeof(float), matrix.elements, matrix.pitch, matrix.col * sizeof(float), matrix.row, cudaMemcpyDeviceToHost); std::cout << "cudaMemcpy2D done: " << cudaGetErrorString(err) << std::endl; return new_matrix; } __global__ void matrix_mul_strait(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { int row = blockDim.x * blockIdx.x + threadIdx.x; int col = blockDim.y * blockIdx.y + threadIdx.y; // target index: [row][col] float element = 0; for (int i = 0; i < mat0.col; ++i) { element += mat0.get(row, i) * mat1.get(i, col); } // printf("(%d, %d): %d\n", row, col, int(element)); result.set(row, col, element); } __global__ void matrix_mul_shared(GPUMatrix mat0, GPUMatrix mat1, GPUMatrix result) { const int row_min = blockDim.x * blockIdx.x; const int col_min = blockDim.y * blockIdx.y; const int target_row = blockDim.x * blockIdx.x + threadIdx.x; const int target_col = blockDim.y * blockIdx.y + threadIdx.y; // assign matrix values in range // row: [row_min, row_max), col: [col_min, col_max) // iterate mat0.col / blockDim.x times. float c_value = 0.0f; for (int iter = 0, base = 0; iter < int(mat0.col / blockDim.y); ++iter, base += block_size) { __shared__ float mat0_submatrix[block_size][block_size]; __shared__ float mat1_submatrix[block_size][block_size]; mat0_submatrix[threadIdx.x][threadIdx.y] = mat0.get( row_min + threadIdx.x, base + threadIdx.y); mat1_submatrix[threadIdx.x][threadIdx.y] = mat1.get( base + threadIdx.x, col_min + threadIdx.y); __syncthreads(); for (int i = 0; i < block_size; ++i) { c_value += mat0_submatrix[threadIdx.x][i] * mat1_submatrix[i][threadIdx.y]; } __syncthreads(); } result.set(target_row, target_col, c_value); } int main() { Matrix host_matrix_A(64, 128); for (int i = 0; i < host_matrix_A.row; ++i) { for (int j = 0; j < host_matrix_A.col; ++j) { host_matrix_A.set(i, j, 1.0); } } Matrix host_matrix_B(128, 64); for (int i = 0; i < host_matrix_B.row; ++i) { for (int j = 0; j < host_matrix_B.col; ++j) { host_matrix_B.set(i, j, 1.0); } } Matrix host_matrix_C(64, 64); for (int i = 0; i < host_matrix_C.row; ++i) { for (int j = 0; j < host_matrix_C.col; ++j) { host_matrix_C.set(i, j, 0.0); } } GPUMatrix gpu_matrix_A = load_to_gpu(host_matrix_A); GPUMatrix gpu_matrix_B = load_to_gpu(host_matrix_B); GPUMatrix gpu_matrix_C = load_to_gpu(host_matrix_C); dim3 block_dim(16, 16); dim3 grid_dim(64 / 16, 64 / 16); matrix_mul_shared<<<grid_dim, block_dim>>>(gpu_matrix_A, gpu_matrix_B, gpu_matrix_C); host_matrix_C = copy_to_host(gpu_matrix_C); print_matrix(host_matrix_C); return 0; }
fa204d235a588b9e6f8891e7ca151e6d9b040a84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // CDP_LU.CU // // Test infrastructure is here. Kernels are each included in separate files. // #include <stdio.h> //#include <omp.h> #include "cdp_lu.h" #include "cdp_lu_utils.h" extern __global__ void dgetrf_cdpentry(Parameters *device_params); // Entry point for dgetrf. We allocate memories and simply call the kernel. void dgetrf_test(Parameters *host_params, Parameters *device_params) { double t_start = time_in_seconds(); // Launch the kernel (just a device-function call in CDP terms) hipLaunchKernelGGL(( dgetrf_cdpentry), dim3(1), dim3(1) , 0, 0, device_params); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Failed to launch CDP kernel (%s)\nCalling exit...\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("Successfully launched CDP kernel\n"); } checkCudaErrors(hipDeviceSynchronize()); double gpu_sec = time_in_seconds() - t_start; // Check our return data /* for(int b=0; b<batch; b++) { if(*(params[b].hostmem.info) != 0) printf("Degenerate matrix %d/%d.\n", b+1, batch); } */ double flop_count = (double) host_params->flop_count; printf("GPU perf(dgetrf)= %.3f Gflops\n", flop_count / (1000000000.*gpu_sec)); }
fa204d235a588b9e6f8891e7ca151e6d9b040a84.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // CDP_LU.CU // // Test infrastructure is here. Kernels are each included in separate files. // #include <stdio.h> //#include <omp.h> #include "cdp_lu.h" #include "cdp_lu_utils.h" extern __global__ void dgetrf_cdpentry(Parameters *device_params); // Entry point for dgetrf. We allocate memories and simply call the kernel. void dgetrf_test(Parameters *host_params, Parameters *device_params) { double t_start = time_in_seconds(); // Launch the kernel (just a device-function call in CDP terms) dgetrf_cdpentry<<< 1, 1 >>>(device_params); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Failed to launch CDP kernel (%s)\nCalling exit...\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("Successfully launched CDP kernel\n"); } checkCudaErrors(cudaDeviceSynchronize()); double gpu_sec = time_in_seconds() - t_start; // Check our return data /* for(int b=0; b<batch; b++) { if(*(params[b].hostmem.info) != 0) printf("Degenerate matrix %d/%d.\n", b+1, batch); } */ double flop_count = (double) host_params->flop_count; printf("GPU perf(dgetrf)= %.3f Gflops\n", flop_count / (1000000000.*gpu_sec)); }
dcc384e6a7cdc0bcb11703a75b12866496e20600.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime_api.h" #include <stdio.h> hipError_t flip8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) { for (int i = 0; i < xSize * ySize; i++) { if (GoldInput[i] != CudaInput[i]) { printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]); return(1); } } return(0); } void flip8WithC(unsigned char* in, unsigned char* out, int xSize, int ySize) { size_t x, y; for (y = 0; y < ySize; y += 1) { for (x = 0; x < xSize; x += 1) { size_t in_pos = y * xSize + x; size_t out_pos = (ySize - y - 1) * xSize + x; *(out + out_pos) = *(in + in_pos); } } } __global__ void kernelFlipY8(unsigned char* input, unsigned char* output, int size) { int xWidth = blockDim.x * gridDim.x; int yWidth = blockDim.y * gridDim.y; int xLoc = (blockIdx.x * blockDim.x + threadIdx.x); int yLoc = blockIdx.y * blockDim.y + threadIdx.y; int index = xLoc + yLoc * xWidth; if (index < (size)) { output[(yWidth - yLoc - 1) * xWidth + xLoc] = input[index]; } } int main() { unsigned char* input, * CudaOutput, * GoldOutput; int xSize, ySize; xSize = 512; ySize = 512; input = new unsigned char[xSize * ySize]; CudaOutput = new unsigned char[xSize * ySize]; GoldOutput = new unsigned char[xSize * ySize]; int i, j; printf("xSize=%d ySize=%d \n", xSize, ySize); FILE* fp; //fp = fopen("barbara_gray.raw", "rb"); //fread(input, xSize, ySize, fp); for (int i = 0; i < ySize; i++) for (int j = 0; j < xSize; j++) input[i * xSize + j] = (i * j) % 255; flip8WithC(input, GoldOutput, xSize, ySize); // Add vectors in parallel. hipError_t cudaStatus = flip8WithCuda(input, CudaOutput, xSize, ySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "invert8WithCuda failed!"); return 1; } int error = verify(GoldOutput, CudaOutput, xSize, ySize); if (error != 0) printf("Verify Failed \n"); else printf("Verify Successful \n"); fp = fopen("COutput.raw", "wb"); fwrite(GoldOutput, xSize, ySize, fp); fclose(fp); fp = fopen("CudaOutput.raw", "wb"); fwrite(CudaOutput, xSize, ySize, fp); fclose(fp); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } delete[] CudaOutput; delete[] GoldOutput; delete[] input; return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t flip8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize) { unsigned char* dev_input = 0; unsigned char* dev_output = 0; // hipProfilerInitialize(); unsigned int xysize = xSize * ySize; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. hipDeviceProp_t prop; int count; dim3 blocks, threads; threads.x = 512; threads.y = 1; //512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks blocks.x = (xSize + threads.x - 1) / (threads.x); //1 blocks.y = (ySize + threads.y - 1) / (threads.y); //512 printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y); printf("threads.x = %d threads.y=%d \n", threads.x, threads.y); hipGetDeviceCount(&count); printf("Count = %d\n", count); cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } hipEventRecord(start, 0); // Allocate GPU buffers for two input . cudaStatus = hipMalloc((void**)&dev_input, xysize * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_output, xysize * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_input, input, xysize, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } hipProfilerStart(); // Launch a kernel on the GPU with one thread for each element. kernelFlipY8 __dim__(blocks, threads) (dev_input, dev_output, xysize); hipProfilerStop(); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(output, dev_output, xysize * sizeof(char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } hipEventRecord(stop, 0); hipEventSynchronize(stop); float cudaElapsedTime; hipEventElapsedTime(&cudaElapsedTime, start, stop); printf("Time for execution = %3.1f ms \n", cudaElapsedTime); hipEventDestroy(start); hipEventDestroy(stop); Error: hipFree(dev_input); hipFree(dev_output); return cudaStatus; }
dcc384e6a7cdc0bcb11703a75b12866496e20600.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_profiler_api.h" #include <stdio.h> cudaError_t flip8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* input, unsigned char* output, int xSize, int ySize); int verify(unsigned char* GoldInput, unsigned char* CudaInput, int xSize, int ySize) { for (int i = 0; i < xSize * ySize; i++) { if (GoldInput[i] != CudaInput[i]) { printf("GoldInput[%d] = %d CInput[%d]=%d \n", i, GoldInput[i], i, CudaInput[i]); return(1); } } return(0); } void flip8WithC(unsigned char* in, unsigned char* out, int xSize, int ySize) { size_t x, y; for (y = 0; y < ySize; y += 1) { for (x = 0; x < xSize; x += 1) { size_t in_pos = y * xSize + x; size_t out_pos = (ySize - y - 1) * xSize + x; *(out + out_pos) = *(in + in_pos); } } } __global__ void kernelFlipY8(unsigned char* input, unsigned char* output, int size) { int xWidth = blockDim.x * gridDim.x; int yWidth = blockDim.y * gridDim.y; int xLoc = (blockIdx.x * blockDim.x + threadIdx.x); int yLoc = blockIdx.y * blockDim.y + threadIdx.y; int index = xLoc + yLoc * xWidth; if (index < (size)) { output[(yWidth - yLoc - 1) * xWidth + xLoc] = input[index]; } } int main() { unsigned char* input, * CudaOutput, * GoldOutput; int xSize, ySize; xSize = 512; ySize = 512; input = new unsigned char[xSize * ySize]; CudaOutput = new unsigned char[xSize * ySize]; GoldOutput = new unsigned char[xSize * ySize]; int i, j; printf("xSize=%d ySize=%d \n", xSize, ySize); FILE* fp; //fp = fopen("barbara_gray.raw", "rb"); //fread(input, xSize, ySize, fp); for (int i = 0; i < ySize; i++) for (int j = 0; j < xSize; j++) input[i * xSize + j] = (i * j) % 255; flip8WithC(input, GoldOutput, xSize, ySize); // Add vectors in parallel. cudaError_t cudaStatus = flip8WithCuda(input, CudaOutput, xSize, ySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "invert8WithCuda failed!"); return 1; } int error = verify(GoldOutput, CudaOutput, xSize, ySize); if (error != 0) printf("Verify Failed \n"); else printf("Verify Successful \n"); fp = fopen("COutput.raw", "wb"); fwrite(GoldOutput, xSize, ySize, fp); fclose(fp); fp = fopen("CudaOutput.raw", "wb"); fwrite(CudaOutput, xSize, ySize, fp); fclose(fp); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } delete[] CudaOutput; delete[] GoldOutput; delete[] input; return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t flip8WithCuda(unsigned char* input, unsigned char* output, int xSize, int ySize) { unsigned char* dev_input = 0; unsigned char* dev_output = 0; // cudaProfilerInitialize(); unsigned int xysize = xSize * ySize; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaDeviceProp prop; int count; dim3 blocks, threads; threads.x = 512; threads.y = 1; //512x512 : along X 512/8 = 64 thread blocks Alon gY 64 blocks blocks.x = (xSize + threads.x - 1) / (threads.x); //1 blocks.y = (ySize + threads.y - 1) / (threads.y); //512 printf("blocks.x = %d blocks.y=%d \n", blocks.x, blocks.y); printf("threads.x = %d threads.y=%d \n", threads.x, threads.y); cudaGetDeviceCount(&count); printf("Count = %d\n", count); cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaEventRecord(start, 0); // Allocate GPU buffers for two input . cudaStatus = cudaMalloc((void**)&dev_input, xysize * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_output, xysize * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_input, input, xysize, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaProfilerStart(); // Launch a kernel on the GPU with one thread for each element. kernelFlipY8 __dim__(blocks, threads) (dev_input, dev_output, xysize); cudaProfilerStop(); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching invert8Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(output, dev_output, xysize * sizeof(char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float cudaElapsedTime; cudaEventElapsedTime(&cudaElapsedTime, start, stop); printf("Time for execution = %3.1f ms \n", cudaElapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); Error: cudaFree(dev_input); cudaFree(dev_output); return cudaStatus; }
7015ee84dcf921965fdc852654eab49b9818c734.hip
// !!! This is a file automatically generated by hipify!!! #include <utilities/cnn_derivative_test.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/gen_random.h> #include <device/handles.h> #include <core/datadefs.h> #include <core/errors.h> #include <functions/cnn_gradient.h> #include <functions/cnn_hessian_vec.h> #include <functions/cnn_eval_model.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <utilities/utils.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> /* (W,b) = 0. (Wr, br) = random point. (Wr', br') = random point. f(W,b) = model_eval @ (W,b) g(W,b) = gradient_eval @ (W,b) for (N points){ (Wc, bc) = (W,b) + (Wr', br'); f(Wc,bc) = model_eval @ (Wc, bc) f_error = f(Wc,bc) - { f(W,b) + g(W,b)*(Wr',br') + (1/2)*(Wr',br') * h(W,b) * (Wr',br') } //compute first order error //compute second order error. (Wr', br') = (Wr', br') / 2 } */ void cnnDerivativeTest( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { int numPoints = 25; real *devPtr = scratch->nextDevPtr; real *hostPtr = scratch->nextHostPtr; real *pagedMem = scratch->pageLckWorkspace; //device space real *W0 = devPtr; real *Wr = W0 + model->pSize; real *Wc = Wr + model->pSize; //Gradient Space real *z = Wc + model->pSize; real *dx = z + model->zSize; real *gradient = dx + model->zSize; //hv Space real *lossFuncErrors = gradient + model->pSize; real *rz = lossFuncErrors + model->maxDeltaSize; real *rerror = rz + model->zSize; real *probs = rerror + model->maxDeltaSize; real *Hv = probs + model->pSize; //Scratch if needed. real *nextDevPtr = Hv + model->pSize; //host space real *d2error = hostPtr; real *d3error = d2error + numPoints; real *dx2 = d3error + numPoints; real *dx3 = dx2 + numPoints; real *dxs = dx3 + numPoints; real *nextHostPtr = dxs + numPoints; //page space real *ll0 = pagedMem; real *llc = ll0 + 1; real *modelErr0 = llc + 1; real *modelErrC= modelErr0 + 1; real *firstOrderTerm = modelErrC+ 1; real *firstOrderTermCum = firstOrderTerm + 1; real *secondOrderTerm = firstOrderTermCum + 1; real *secondOrderTermCum = secondOrderTerm + 1; real *nextPageLckPtr = secondOrderTermCum + 1; //auxilaries here. int vecSize = model->pSize; real alpha = 1; real discard; int offset, numSamples; real start, total; //reset the scratch area scratch->nextDevPtr = nextDevPtr; scratch->nextHostPtr = nextHostPtr; scratch->nextPageLckPtr = nextPageLckPtr; //Zero Point cuda_memset( W0, 0, sizeof(real) * vecSize, ERROR_MEMSET ); cuda_memset( Wc, 0, sizeof(real) * vecSize, ERROR_MEMSET ); //Random Point getRandomVector( vecSize, NULL, Wr, RAND_UNIFORM ); getRandomVector( vecSize, NULL, W0, RAND_UNIFORM ); alpha = 0.1; cublasCheckError( hipblasDscal( cublasHandle, vecSize, &alpha, Wr, 1 ) ); cublasCheckError( hipblasDscal( cublasHandle, vecSize, &alpha, W0, 1 ) ); fprintf( stderr, "Initialized Random Points... \n"); memset( d2error, 0, sizeof(real) * numPoints ); memset( d3error, 0, sizeof(real) * numPoints ); fprintf( stderr, "Begin .... \n"); for (int i = 0; i < numPoints; i ++) { offset = 0; numSamples = 0; *modelErr0 = *modelErrC = 0; *firstOrderTermCum = *secondOrderTermCum = 0; start = total = 0; start = Get_Time (); for (int j = 0; j < (data->trainSizeX); j += model->batchSize){ offset = j; if( (j + model->batchSize) <= data->trainSizeX ) numSamples = model->batchSize; else numSamples = data->trainSizeX % model->batchSize; //f0, g0 /* copy_device( data->weights, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErr0 += computeCNNGradient( model, data, scratch, z, dx, probs, lossFuncErrors, gradient, offset, numSamples); */ //add two points Wc = W0 + Wr copy_device( Wc, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); alpha = 1; cublasCheckError( hipblasDaxpy( cublasHandle, vecSize, &alpha, Wr, 1, //x Wc, 1 ) ); //y //f(c) copy_device( data->weights, Wc, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErrC += evaluateCNNModel( model, data, scratch, z, probs, lossFuncErrors, offset, numSamples); //second order term computation //hessian * Wc //result is stored in the nextDevPtr copy_device( data->weights, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErr0 += computeCNNGradient( model, data, scratch, z, dx, probs, lossFuncErrors, gradient, offset, numSamples, 0); cnnHv( model, data, z, probs, lossFuncErrors, dx, Wr, Hv, offset, numSamples, nextDevPtr, nextHostPtr, 0 ); //fprintf( stderr, "Offset: %d, samples: %d, j: %d, Err0: %g, ErrC: %g \n", // offset * 3 * 1024, numSamples, j, *modelErr0, *modelErrC ); //Wr * gradient cublasCheckError( hipblasDdot( cublasHandle, vecSize, Wr, 1, gradient, 1, firstOrderTerm ) ); *firstOrderTermCum += *firstOrderTerm; //Wr * (hessian * Wr ) cublasCheckError( hipblasDdot( cublasHandle, vecSize, Hv, 1, Wr, 1, secondOrderTerm ) ); *secondOrderTermCum += *secondOrderTerm; } fprintf( stderr, "\n"); total = Get_Timing_Info( start ); //Normalize by the size of the dataset. *modelErrC /= (double)data->trainSizeX; *modelErr0 /= (double)data->trainSizeX; *firstOrderTermCum /= (double)data->trainSizeX; *secondOrderTermCum /= (double)data->trainSizeX; d2error[ i ] = fabs( (*modelErrC) - ((*modelErr0) + (*firstOrderTermCum))) / fabs(*modelErrC); d3error[ i ] = fabs( ( *modelErrC ) - ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum) )) / fabs( *modelErrC ); fprintf( stderr, "First ORder Error: %e, %g \n", *firstOrderTermCum, *firstOrderTermCum ); fprintf( stderr, "Second ORder Error: %e, %g, %g \n", *secondOrderTermCum, *secondOrderTermCum, 0.5 * (*secondOrderTermCum) ); fprintf( stderr, "f == %g, f0 == %g, firstOrderErr: %g, secondOrderErr: %g\n", (*modelErrC), (*modelErr0), d2error[i], d3error[i] ); fprintf( stderr, " dg == %g, dHd == %g, estimated val == %g, error == %g \n", *firstOrderTermCum, 0.5 * (*secondOrderTermCum), ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum)), (*modelErrC) - ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum)) ) ; cublasCheckError( hipblasDnrm2 ( cublasHandle, vecSize, Wr, 1, nextPageLckPtr ) ); dx2[ i ] = pow( *nextPageLckPtr, 2.); dx3[ i ] = pow( *nextPageLckPtr, 3.); dxs[ i ] = *nextPageLckPtr; //Wr = Wr / 2 alpha = 0.5; cublasCheckError( hipblasDscal( cublasHandle, vecSize, &alpha, Wr, 1 ) ); fprintf( stderr, "Done with ....... %d in %f seconds \n\n\n", i, total ); } //write the results in a file here. writeVector( d2error, numPoints, "./d2_errors.txt", 1, d2error ); writeVector( d3error, numPoints, "./d3_errors.txt", 1, d3error ); writeVector( dx2, numPoints, "./dx2_order.txt", 1, dx2 ); writeVector( dx3, numPoints, "./dx3_order.txt", 1, dx3 ); writeVector( dxs, numPoints, "./dxs.txt", 1, dx3 ); }
7015ee84dcf921965fdc852654eab49b9818c734.cu
#include <utilities/cnn_derivative_test.h> #include <device/cuda_utils.h> #include <device/device_defines.h> #include <device/gen_random.h> #include <device/handles.h> #include <core/datadefs.h> #include <core/errors.h> #include <functions/cnn_gradient.h> #include <functions/cnn_hessian_vec.h> #include <functions/cnn_eval_model.h> #include <functions/dev_initializations.h> #include <utilities/print_utils.h> #include <utilities/utils.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas_v2.h> /* (W,b) = 0. (Wr, br) = random point. (Wr', br') = random point. f(W,b) = model_eval @ (W,b) g(W,b) = gradient_eval @ (W,b) for (N points){ (Wc, bc) = (W,b) + (Wr', br'); f(Wc,bc) = model_eval @ (Wc, bc) f_error = f(Wc,bc) - { f(W,b) + g(W,b)*(Wr',br') + (1/2)*(Wr',br') * h(W,b) * (Wr',br') } //compute first order error //compute second order error. (Wr', br') = (Wr', br') / 2 } */ void cnnDerivativeTest( CNN_MODEL *model, DEVICE_DATASET *data, SCRATCH_AREA *scratch ) { int numPoints = 25; real *devPtr = scratch->nextDevPtr; real *hostPtr = scratch->nextHostPtr; real *pagedMem = scratch->pageLckWorkspace; //device space real *W0 = devPtr; real *Wr = W0 + model->pSize; real *Wc = Wr + model->pSize; //Gradient Space real *z = Wc + model->pSize; real *dx = z + model->zSize; real *gradient = dx + model->zSize; //hv Space real *lossFuncErrors = gradient + model->pSize; real *rz = lossFuncErrors + model->maxDeltaSize; real *rerror = rz + model->zSize; real *probs = rerror + model->maxDeltaSize; real *Hv = probs + model->pSize; //Scratch if needed. real *nextDevPtr = Hv + model->pSize; //host space real *d2error = hostPtr; real *d3error = d2error + numPoints; real *dx2 = d3error + numPoints; real *dx3 = dx2 + numPoints; real *dxs = dx3 + numPoints; real *nextHostPtr = dxs + numPoints; //page space real *ll0 = pagedMem; real *llc = ll0 + 1; real *modelErr0 = llc + 1; real *modelErrC= modelErr0 + 1; real *firstOrderTerm = modelErrC+ 1; real *firstOrderTermCum = firstOrderTerm + 1; real *secondOrderTerm = firstOrderTermCum + 1; real *secondOrderTermCum = secondOrderTerm + 1; real *nextPageLckPtr = secondOrderTermCum + 1; //auxilaries here. int vecSize = model->pSize; real alpha = 1; real discard; int offset, numSamples; real start, total; //reset the scratch area scratch->nextDevPtr = nextDevPtr; scratch->nextHostPtr = nextHostPtr; scratch->nextPageLckPtr = nextPageLckPtr; //Zero Point cuda_memset( W0, 0, sizeof(real) * vecSize, ERROR_MEMSET ); cuda_memset( Wc, 0, sizeof(real) * vecSize, ERROR_MEMSET ); //Random Point getRandomVector( vecSize, NULL, Wr, RAND_UNIFORM ); getRandomVector( vecSize, NULL, W0, RAND_UNIFORM ); alpha = 0.1; cublasCheckError( cublasDscal( cublasHandle, vecSize, &alpha, Wr, 1 ) ); cublasCheckError( cublasDscal( cublasHandle, vecSize, &alpha, W0, 1 ) ); fprintf( stderr, "Initialized Random Points... \n"); memset( d2error, 0, sizeof(real) * numPoints ); memset( d3error, 0, sizeof(real) * numPoints ); fprintf( stderr, "Begin .... \n"); for (int i = 0; i < numPoints; i ++) { offset = 0; numSamples = 0; *modelErr0 = *modelErrC = 0; *firstOrderTermCum = *secondOrderTermCum = 0; start = total = 0; start = Get_Time (); for (int j = 0; j < (data->trainSizeX); j += model->batchSize){ offset = j; if( (j + model->batchSize) <= data->trainSizeX ) numSamples = model->batchSize; else numSamples = data->trainSizeX % model->batchSize; //f0, g0 /* copy_device( data->weights, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErr0 += computeCNNGradient( model, data, scratch, z, dx, probs, lossFuncErrors, gradient, offset, numSamples); */ //add two points Wc = W0 + Wr copy_device( Wc, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); alpha = 1; cublasCheckError( cublasDaxpy( cublasHandle, vecSize, &alpha, Wr, 1, //x Wc, 1 ) ); //y //f(c) copy_device( data->weights, Wc, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErrC += evaluateCNNModel( model, data, scratch, z, probs, lossFuncErrors, offset, numSamples); //second order term computation //hessian * Wc //result is stored in the nextDevPtr copy_device( data->weights, W0, sizeof(real) * vecSize, ERROR_MEMCPY_DEVICE_DEVICE ); *modelErr0 += computeCNNGradient( model, data, scratch, z, dx, probs, lossFuncErrors, gradient, offset, numSamples, 0); cnnHv( model, data, z, probs, lossFuncErrors, dx, Wr, Hv, offset, numSamples, nextDevPtr, nextHostPtr, 0 ); //fprintf( stderr, "Offset: %d, samples: %d, j: %d, Err0: %g, ErrC: %g \n", // offset * 3 * 1024, numSamples, j, *modelErr0, *modelErrC ); //Wr * gradient cublasCheckError( cublasDdot( cublasHandle, vecSize, Wr, 1, gradient, 1, firstOrderTerm ) ); *firstOrderTermCum += *firstOrderTerm; //Wr * (hessian * Wr ) cublasCheckError( cublasDdot( cublasHandle, vecSize, Hv, 1, Wr, 1, secondOrderTerm ) ); *secondOrderTermCum += *secondOrderTerm; } fprintf( stderr, "\n"); total = Get_Timing_Info( start ); //Normalize by the size of the dataset. *modelErrC /= (double)data->trainSizeX; *modelErr0 /= (double)data->trainSizeX; *firstOrderTermCum /= (double)data->trainSizeX; *secondOrderTermCum /= (double)data->trainSizeX; d2error[ i ] = fabs( (*modelErrC) - ((*modelErr0) + (*firstOrderTermCum))) / fabs(*modelErrC); d3error[ i ] = fabs( ( *modelErrC ) - ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum) )) / fabs( *modelErrC ); fprintf( stderr, "First ORder Error: %e, %g \n", *firstOrderTermCum, *firstOrderTermCum ); fprintf( stderr, "Second ORder Error: %e, %g, %g \n", *secondOrderTermCum, *secondOrderTermCum, 0.5 * (*secondOrderTermCum) ); fprintf( stderr, "f == %g, f0 == %g, firstOrderErr: %g, secondOrderErr: %g\n", (*modelErrC), (*modelErr0), d2error[i], d3error[i] ); fprintf( stderr, " dg == %g, dHd == %g, estimated val == %g, error == %g \n", *firstOrderTermCum, 0.5 * (*secondOrderTermCum), ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum)), (*modelErrC) - ((*modelErr0) + (*firstOrderTermCum) + 0.5 * (*secondOrderTermCum)) ) ; cublasCheckError( cublasDnrm2 ( cublasHandle, vecSize, Wr, 1, nextPageLckPtr ) ); dx2[ i ] = pow( *nextPageLckPtr, 2.); dx3[ i ] = pow( *nextPageLckPtr, 3.); dxs[ i ] = *nextPageLckPtr; //Wr = Wr / 2 alpha = 0.5; cublasCheckError( cublasDscal( cublasHandle, vecSize, &alpha, Wr, 1 ) ); fprintf( stderr, "Done with ....... %d in %f seconds \n\n\n", i, total ); } //write the results in a file here. writeVector( d2error, numPoints, "./d2_errors.txt", 1, d2error ); writeVector( d3error, numPoints, "./d3_errors.txt", 1, d3error ); writeVector( dx2, numPoints, "./dx2_order.txt", 1, dx2 ); writeVector( dx3, numPoints, "./dx3_order.txt", 1, dx3 ); writeVector( dxs, numPoints, "./dxs.txt", 1, dx3 ); }
79dfe9d12ea35ef650b1f5c6da52f3dd6046e7fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; // TODO pattern entrelacement int i; int j; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
79dfe9d12ea35ef650b1f5c6da52f3dd6046e7fb.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; // TODO pattern entrelacement int i; int j; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
beeca8d0d779e87630a6a7ed4c77deb037de8c34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ #include "common_magma.h" #define magmablas_dgemv_fermi magmablas_dgemv #define dgemv_bs 64 #define threadSize 256 __global__ void dgemvn_kernel_fermi( int m, int n, int n1, double alpha, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { int ind = blockIdx.x*dgemv_bs + threadIdx.x; if(ind < m) { A += ind; } double res = 0.0; __shared__ double buff[dgemv_bs]; for(int i=0; i<n1; i += dgemv_bs ){ __syncthreads(); buff[threadIdx.x] = x[(threadIdx.x + i) * incx]; __syncthreads(); #pragma unroll for(int j=0; j < dgemv_bs ; j++){ res+=A[0]*buff[j]; A+=lda; } } __syncthreads(); if(ind < m) { if (n>n1) { for(int j=0; j<(n-n1); j++){ res += A[0] * x[(n1+j) * incx]; A+=lda; } } } if (ind<m) y[ind*incy] = alpha * res + beta * y[ind*incy]; } extern "C" void magmablas_dgemvn_fermi( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *y, magma_int_t incy) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes y = alpha A x on the GPU. M - (input) INTEGER. On entry, N specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, M specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, m ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Y - (output) DOUBLE PRECISION array of dimension m. On exit Y = alpha A X. ===================================================================== */ magma_int_t blocks; if (m % dgemv_bs==0) blocks = m/dgemv_bs; else blocks = m/dgemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(dgemv_bs, 1, 1); hipLaunchKernelGGL(( dgemvn_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, (n/ dgemv_bs)*dgemv_bs, alpha, A, lda, x, incx, beta, y, incy); } __global__ void dgemvt_kernel_fermi( int m, int n, double alpha, int n1, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { int tx = threadIdx.x; __shared__ double sdata[threadSize]; double res; res = 0.0; for(int i=0; i<n1; i+= threadSize) { res += A[tx + i + lda * blockIdx.y] * x[(tx + i)*incx]; } if(m > n1) { if( tx + n1 < m ) { res += A[tx + n1 + lda *blockIdx.y] * x[(tx + n1)*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(tx<32) { sdata[tx] += sdata[tx+32]; } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } } if( tx == 0 ) { if (blockIdx.y < n) { y[blockIdx.y*incy] = sdata[0] * alpha + beta * y[blockIdx.y*incy]; } } } extern "C" void magmablas_dgemvt_fermi( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *y, magma_int_t incy) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes y = alpha A^t x on the GPU. M - (input) INTEGER. On entry, m specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, n specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Y - (output) DOUBLE PRECISION array of dimension n. On exit y = alpha A^t X. ===================================================================== */ dim3 grid ( 1, n, 1); dim3 threads ( threadSize, 1, 1); hipLaunchKernelGGL(( dgemvt_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, ( m / threadSize) * threadSize, A, lda, x, incx, beta, y, incy); } extern "C" void magmablas_dgemv_fermi(char trans, magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *z, magma_int_t incz) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes: 1) z = A x if trans == 'N' or 'n', alpha == 1, beta == 0, and incx == incz == 1 (using magmablas code) 2) z = alpha A^t x if trans == 'T' or 't', beta == 0, and incx == incz == 1 (using magmablas code) 3) z = alpha A^trans x + beta z otherwise, using CUBLAS. Arguments ========== TRANS - CHARACTER*1 On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' z := alpha*A *x + beta*z TRANS = 'T' or 't' z := alpha*A'*x + beta*z M - (input) INTEGER On entry, m specifies the number of rows of the matrix A. N - (input) INTEGER On entry, n specifies the number of columns of the matrix A ALPHA - DOUBLE REAL On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension n if trans == 'n' m if trans == 't' INCX - (input) Specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Z - (output) DOUBLE PRECISION array of dimension m if trans == 'n' n if trans == 't' INCZ - (input) Specifies the increment for the elements of Z. INCZ must not be zero. Unchanged on exit. ===================================================================== */ //if (incx == 1 && incz == 1) { if (trans == 'n' || trans == 'N') { if ( m >= 7000 && m <= 8000 ) hipblasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); else magmablas_dgemvn_fermi(m, n, alpha, A, lda, x, incx, beta, z, incz); } else if (trans == 't' || trans == 'T') magmablas_dgemvt_fermi(m, n, alpha, A, lda, x, incx, beta, z, incz); else printf("trans = %c in sgemv_fermi is not available\n", trans); } // else // hipblasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); } #undef dgemv_bs #undef threadSize
beeca8d0d779e87630a6a7ed4c77deb037de8c34.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ #include "common_magma.h" #define magmablas_dgemv_fermi magmablas_dgemv #define dgemv_bs 64 #define threadSize 256 __global__ void dgemvn_kernel_fermi( int m, int n, int n1, double alpha, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { int ind = blockIdx.x*dgemv_bs + threadIdx.x; if(ind < m) { A += ind; } double res = 0.0; __shared__ double buff[dgemv_bs]; for(int i=0; i<n1; i += dgemv_bs ){ __syncthreads(); buff[threadIdx.x] = x[(threadIdx.x + i) * incx]; __syncthreads(); #pragma unroll for(int j=0; j < dgemv_bs ; j++){ res+=A[0]*buff[j]; A+=lda; } } __syncthreads(); if(ind < m) { if (n>n1) { for(int j=0; j<(n-n1); j++){ res += A[0] * x[(n1+j) * incx]; A+=lda; } } } if (ind<m) y[ind*incy] = alpha * res + beta * y[ind*incy]; } extern "C" void magmablas_dgemvn_fermi( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *y, magma_int_t incy) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes y = alpha A x on the GPU. M - (input) INTEGER. On entry, N specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, M specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, m ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Y - (output) DOUBLE PRECISION array of dimension m. On exit Y = alpha A X. ===================================================================== */ magma_int_t blocks; if (m % dgemv_bs==0) blocks = m/dgemv_bs; else blocks = m/dgemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(dgemv_bs, 1, 1); dgemvn_kernel_fermi<<< grid, threads, 0, magma_stream >>>(m, n, (n/ dgemv_bs)*dgemv_bs, alpha, A, lda, x, incx, beta, y, incy); } __global__ void dgemvt_kernel_fermi( int m, int n, double alpha, int n1, const double *A, int lda, const double *x, int incx, double beta, double *y, int incy) { int tx = threadIdx.x; __shared__ double sdata[threadSize]; double res; res = 0.0; for(int i=0; i<n1; i+= threadSize) { res += A[tx + i + lda * blockIdx.y] * x[(tx + i)*incx]; } if(m > n1) { if( tx + n1 < m ) { res += A[tx + n1 + lda *blockIdx.y] * x[(tx + n1)*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(tx<32) { sdata[tx] += sdata[tx+32]; } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } } if( tx == 0 ) { if (blockIdx.y < n) { y[blockIdx.y*incy] = sdata[0] * alpha + beta * y[blockIdx.y*incy]; } } } extern "C" void magmablas_dgemvt_fermi( magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *y, magma_int_t incy) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes y = alpha A^t x on the GPU. M - (input) INTEGER. On entry, m specifies the number of rows of the matrix A. N - (input) INTEGER. On entry, n specifies the number of columns of the matrix A A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER. LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension m. Y - (output) DOUBLE PRECISION array of dimension n. On exit y = alpha A^t X. ===================================================================== */ dim3 grid ( 1, n, 1); dim3 threads ( threadSize, 1, 1); dgemvt_kernel_fermi<<< grid, threads, 0, magma_stream >>>( m, n, alpha, ( m / threadSize) * threadSize, A, lda, x, incx, beta, y, incy); } extern "C" void magmablas_dgemv_fermi(char trans, magma_int_t m, magma_int_t n, double alpha, const double *A, magma_int_t lda, const double *x, magma_int_t incx, double beta, double *z, magma_int_t incz) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= This routine computes: 1) z = A x if trans == 'N' or 'n', alpha == 1, beta == 0, and incx == incz == 1 (using magmablas code) 2) z = alpha A^t x if trans == 'T' or 't', beta == 0, and incx == incz == 1 (using magmablas code) 3) z = alpha A^trans x + beta z otherwise, using CUBLAS. Arguments ========== TRANS - CHARACTER*1 On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' z := alpha*A *x + beta*z TRANS = 'T' or 't' z := alpha*A'*x + beta*z M - (input) INTEGER On entry, m specifies the number of rows of the matrix A. N - (input) INTEGER On entry, n specifies the number of columns of the matrix A ALPHA - DOUBLE REAL On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - (input) DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU. LDA - (input) INTEGER LDA specifies the leading dimension of A. X - (input) DOUBLE PRECISION array of dimension n if trans == 'n' m if trans == 't' INCX - (input) Specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Z - (output) DOUBLE PRECISION array of dimension m if trans == 'n' n if trans == 't' INCZ - (input) Specifies the increment for the elements of Z. INCZ must not be zero. Unchanged on exit. ===================================================================== */ //if (incx == 1 && incz == 1) { if (trans == 'n' || trans == 'N') { if ( m >= 7000 && m <= 8000 ) cublasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); else magmablas_dgemvn_fermi(m, n, alpha, A, lda, x, incx, beta, z, incz); } else if (trans == 't' || trans == 'T') magmablas_dgemvt_fermi(m, n, alpha, A, lda, x, incx, beta, z, incz); else printf("trans = %c in sgemv_fermi is not available\n", trans); } // else // cublasDgemv(trans, m, n, alpha, A, lda, x, incx, beta, z, incz); } #undef dgemv_bs #undef threadSize
11771546e8ed8e5e548586d56441390c343314ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cuda_runtime.h> #include<cuda.h> #include<device_launch_parameters.h> #include "common.h" #define DTYPE float #define M 4096 #define N 4096 #define K 4096 #define MBLOCK 32 #define NBLOCK 32 #define Mtile 128 // This will actually be the loop step of `i` loop. #define Ntile 128 // This will actually be the loop step of `j` loop. using namespace std; __global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ // Since the actual computation tile size is greater than than the thread // block tile size, therefore we want to find out what size of the output tile // is a register calculating. // Now each thread will compute an output tile of size (Mchunk, Nchunk). constexpr int Mchunk = Mtile / MBLOCK; constexpr int Nchunk = Ntile / NBLOCK; // Instead find the iteration of the original loop nest that maps to this // thread block here. // It is more elegant to map the iterations instead of row or col. At the end // it doesn't matter becuase the iterations actually determine which row or // col is it. // In this particular launch setup with thread block sizes of (32,32) and each // thread calculating one outptut element, the globalthreadId.x and // globalthreadId.y is actually the iterations we are looking for. // The Outer loops iteration beginning that this thread block tile // is responsible for. int i_iter_tile_base = blockIdx.y * Mtile; int j_iter_tile_base = blockIdx.x * Ntile; // The Inner loop iteration beginning that this thread block tile is // responsible for. int i_iter_thread_base = threadIdx.y * Mchunk; int j_iter_thread_base = threadIdx.x * Nchunk; // The Global index start that this thread is responsible for. int i = i_iter_tile_base + i_iter_thread_base; int j = j_iter_tile_base + j_iter_thread_base; // Allocate a Ctile in registers of dimensions (Mchunk, Nchunk). // Dont know if this actually goes into the resgisters as register file cannot // be indexed. DTYPE Cout[Mchunk * Nchunk]; for(int i = 0; i < Mchunk; ++i){ for(int j = 0; j < Nchunk; ++j){ Cout[i * Nchunk + j] = 0.0f; } } // K dimension is sequential so this is not mapped to the gpu compute // heirarchy. for(int i_iter = i, ci = 0; i_iter < i + Mchunk; ++i_iter, ++ci){ for(int j_iter = j, cj = 0; j_iter < j + Nchunk; ++j_iter, ++cj){ for(int kk = 0; kk < k; ++kk){ if(i_iter < m && j_iter < n){ Cout[ci * Nchunk + cj] += a[i_iter * k + kk] * b[kk * n + j_iter]; } } } } // Write back the result to the output matrix. for(int ii = 0; ii < Mchunk; ++ii){ for(int jj = 0; jj < Nchunk; ++jj){ c[(i + ii) * n + (j + jj)] = Cout[ii * Nchunk + jj]; } } } void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ DTYPE temp = 0; for(int kk = 0; kk < k ; ++kk){ temp += a[i * k + kk] * b[kk * n + j]; } c[i * n + j] = temp; } } } bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4) return false; } } return true; } void initMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX); } } } void printMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ cout<<matrix[i * n + j]<<" "; } cout<<endl; } cout<<endl; } int main(){ DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res; int m ,n, k; m = M; n = N; k = K; h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE)); h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE)); h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE)); h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE)); check_cuda_error(hipMalloc(&d_a, m * k * sizeof(DTYPE))); check_cuda_error(hipMalloc(&d_b, k * n * sizeof(DTYPE))); check_cuda_error(hipMalloc(&d_c, m * n * sizeof(DTYPE))); initMatrix(h_a, m , k); initMatrix(h_b, k , n); initMatrix(h_c_gpu_res, m , n); check_cuda_error(hipMemcpy(d_a, h_a, m * k * sizeof(DTYPE), hipMemcpyHostToDevice)); check_cuda_error(hipMemcpy(d_b, h_b, k * n * sizeof(DTYPE), hipMemcpyHostToDevice)); dim3 block(NBLOCK, MBLOCK, 1); dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1); hipLaunchKernelGGL(( GEMM), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, m , n, k); check_cuda_error(hipPeekAtLastError()); check_cuda_error(hipDeviceSynchronize()); hipMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), hipMemcpyDeviceToHost); hostGEMM(h_a, h_b, h_c, m, n, k); cout<<compareGEMM(h_c, h_c_gpu_res, m, n)<<endl; //printMatrix(h_c, m, n); //cout<<"output gpu\n"; //printMatrix(h_c_gpu_res, m, n); free(h_a); free(h_b); free(h_c); free(h_c_gpu_res); // The Global index start that this thread is responsible for.hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
11771546e8ed8e5e548586d56441390c343314ec.cu
#include<iostream> #include<cuda_runtime.h> #include<cuda.h> #include<device_launch_parameters.h> #include "common.h" #define DTYPE float #define M 4096 #define N 4096 #define K 4096 #define MBLOCK 32 #define NBLOCK 32 #define Mtile 128 // This will actually be the loop step of `i` loop. #define Ntile 128 // This will actually be the loop step of `j` loop. using namespace std; __global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ // Since the actual computation tile size is greater than than the thread // block tile size, therefore we want to find out what size of the output tile // is a register calculating. // Now each thread will compute an output tile of size (Mchunk, Nchunk). constexpr int Mchunk = Mtile / MBLOCK; constexpr int Nchunk = Ntile / NBLOCK; // Instead find the iteration of the original loop nest that maps to this // thread block here. // It is more elegant to map the iterations instead of row or col. At the end // it doesn't matter becuase the iterations actually determine which row or // col is it. // In this particular launch setup with thread block sizes of (32,32) and each // thread calculating one outptut element, the globalthreadId.x and // globalthreadId.y is actually the iterations we are looking for. // The Outer loops iteration beginning that this thread block tile // is responsible for. int i_iter_tile_base = blockIdx.y * Mtile; int j_iter_tile_base = blockIdx.x * Ntile; // The Inner loop iteration beginning that this thread block tile is // responsible for. int i_iter_thread_base = threadIdx.y * Mchunk; int j_iter_thread_base = threadIdx.x * Nchunk; // The Global index start that this thread is responsible for. int i = i_iter_tile_base + i_iter_thread_base; int j = j_iter_tile_base + j_iter_thread_base; // Allocate a Ctile in registers of dimensions (Mchunk, Nchunk). // Dont know if this actually goes into the resgisters as register file cannot // be indexed. DTYPE Cout[Mchunk * Nchunk]; for(int i = 0; i < Mchunk; ++i){ for(int j = 0; j < Nchunk; ++j){ Cout[i * Nchunk + j] = 0.0f; } } // K dimension is sequential so this is not mapped to the gpu compute // heirarchy. for(int i_iter = i, ci = 0; i_iter < i + Mchunk; ++i_iter, ++ci){ for(int j_iter = j, cj = 0; j_iter < j + Nchunk; ++j_iter, ++cj){ for(int kk = 0; kk < k; ++kk){ if(i_iter < m && j_iter < n){ Cout[ci * Nchunk + cj] += a[i_iter * k + kk] * b[kk * n + j_iter]; } } } } // Write back the result to the output matrix. for(int ii = 0; ii < Mchunk; ++ii){ for(int jj = 0; jj < Nchunk; ++jj){ c[(i + ii) * n + (j + jj)] = Cout[ii * Nchunk + jj]; } } } void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ DTYPE temp = 0; for(int kk = 0; kk < k ; ++kk){ temp += a[i * k + kk] * b[kk * n + j]; } c[i * n + j] = temp; } } } bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j ){ if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4) return false; } } return true; } void initMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX); } } } void printMatrix(DTYPE * matrix, int m, int n){ for(int i = 0; i < m; ++i){ for(int j = 0; j < n; ++j){ cout<<matrix[i * n + j]<<" "; } cout<<endl; } cout<<endl; } int main(){ DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res; int m ,n, k; m = M; n = N; k = K; h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE)); h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE)); h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE)); h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE)); check_cuda_error(cudaMalloc(&d_a, m * k * sizeof(DTYPE))); check_cuda_error(cudaMalloc(&d_b, k * n * sizeof(DTYPE))); check_cuda_error(cudaMalloc(&d_c, m * n * sizeof(DTYPE))); initMatrix(h_a, m , k); initMatrix(h_b, k , n); initMatrix(h_c_gpu_res, m , n); check_cuda_error(cudaMemcpy(d_a, h_a, m * k * sizeof(DTYPE), cudaMemcpyHostToDevice)); check_cuda_error(cudaMemcpy(d_b, h_b, k * n * sizeof(DTYPE), cudaMemcpyHostToDevice)); dim3 block(NBLOCK, MBLOCK, 1); dim3 grid((n + Ntile - 1) / Ntile, (m + Mtile - 1) / Mtile, 1); GEMM<<<grid, block>>>(d_a, d_b, d_c, m , n, k); check_cuda_error(cudaPeekAtLastError()); check_cuda_error(cudaDeviceSynchronize()); cudaMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), cudaMemcpyDeviceToHost); hostGEMM(h_a, h_b, h_c, m, n, k); cout<<compareGEMM(h_c, h_c_gpu_res, m, n)<<endl; //printMatrix(h_c, m, n); //cout<<"output gpu\n"; //printMatrix(h_c_gpu_res, m, n); free(h_a); free(h_b); free(h_c); free(h_c_gpu_res); // The Global index start that this thread is responsible for.cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
72ea47e0c88decc6269405ffc8995cb9c9e6723d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/proto/caffe.pb.h" #include "caffe/layers/srnnformat_layer.hpp" namespace caffe { template<typename Dtype> __global__ void format_gpu(const Dtype* input, Dtype* output, int num, int channels, int height, int width, int N, bool gen_indicator, Dtype* indicator) { CUDA_KERNEL_LOOP(index, num * channels * height * width) { int n = index / (channels * height * width); int c = (index % (channels * height * width)) / (height * width); int h = (index % (height * width)) / width; int w = index % width; int top_index, bottom_index, T_idx = w, N_idx = n * height + h; if (gen_indicator && c == 0) { indicator [T_idx * N + N_idx] = int (T_idx != 0); } bottom_index = n * channels * height * width + c * height * width + h * width + w; top_index = T_idx * N * channels + N_idx * channels + c; output[top_index] = input[bottom_index]; } } template<typename Dtype> __global__ void unformat_gpu(const Dtype* input, Dtype* output, int num, int channels, int height, int width, int N) { CUDA_KERNEL_LOOP(index, num * channels * height * width) { int n = index / (channels * height * width); int c = (index % (channels * height * width)) / (height * width); int h = (index % (height * width)) / width; int w = index % width; int top_index, bottom_index, T_idx = w, N_idx = n * height + h; top_index = n * channels * height * width + c * height * width + h * width + w; bottom_index = T_idx * N * channels + N_idx * channels + c; output[top_index] = input[bottom_index]; } } template<typename Dtype> void SRNNFormatLayer< Dtype >::Forward_gpu(const vector< Blob< Dtype >* >& bottom, const vector< Blob< Dtype >* >& top) { const int count = bottom[0]->count(); if ( operation_ == SRNNFormatParameter_Operation_FORMAT ) { /* NOLINT_NEXT_LINE(whitespace/operators) */ hipLaunchKernelGGL(( format_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), num_, channels_, height_, width_, N_, true, top[1]->mutable_gpu_data()); } else { /* NOLINT_NEXT_LINE(whitespace/operators) */ hipLaunchKernelGGL(( unformat_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), num_, channels_, height_, width_, N_); } CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void SRNNFormatLayer< Dtype >::Backward_gpu(const vector< Blob<Dtype>* >& top, const vector<bool>& propagate_down, const vector< Blob<Dtype>* >& bottom) { if (!propagate_down[0]) return; const int count = bottom[0]->count(); if ( operation_ == SRNNFormatParameter_Operation_UNFORMAT ) { if (propagate_down[1]) LOG(FATAL) << this->type() << " Layer cannot backpropagate to the stream size input."; Dtype* nullindicator = NULL; /* NOLINT_NEXT_LINE(whitespace/operators) */ hipLaunchKernelGGL(( format_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), num_, channels_, height_, width_, N_, false, nullindicator); } else { /* NOLINT_NEXT_LINE(whitespace/operators) */ hipLaunchKernelGGL(( unformat_gpu), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), num_, channels_, height_, width_, N_); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SRNNFormatLayer); } // namespace caffe
72ea47e0c88decc6269405ffc8995cb9c9e6723d.cu
#include <vector> #include "caffe/proto/caffe.pb.h" #include "caffe/layers/srnnformat_layer.hpp" namespace caffe { template<typename Dtype> __global__ void format_gpu(const Dtype* input, Dtype* output, int num, int channels, int height, int width, int N, bool gen_indicator, Dtype* indicator) { CUDA_KERNEL_LOOP(index, num * channels * height * width) { int n = index / (channels * height * width); int c = (index % (channels * height * width)) / (height * width); int h = (index % (height * width)) / width; int w = index % width; int top_index, bottom_index, T_idx = w, N_idx = n * height + h; if (gen_indicator && c == 0) { indicator [T_idx * N + N_idx] = int (T_idx != 0); } bottom_index = n * channels * height * width + c * height * width + h * width + w; top_index = T_idx * N * channels + N_idx * channels + c; output[top_index] = input[bottom_index]; } } template<typename Dtype> __global__ void unformat_gpu(const Dtype* input, Dtype* output, int num, int channels, int height, int width, int N) { CUDA_KERNEL_LOOP(index, num * channels * height * width) { int n = index / (channels * height * width); int c = (index % (channels * height * width)) / (height * width); int h = (index % (height * width)) / width; int w = index % width; int top_index, bottom_index, T_idx = w, N_idx = n * height + h; top_index = n * channels * height * width + c * height * width + h * width + w; bottom_index = T_idx * N * channels + N_idx * channels + c; output[top_index] = input[bottom_index]; } } template<typename Dtype> void SRNNFormatLayer< Dtype >::Forward_gpu(const vector< Blob< Dtype >* >& bottom, const vector< Blob< Dtype >* >& top) { const int count = bottom[0]->count(); if ( operation_ == SRNNFormatParameter_Operation_FORMAT ) { /* NOLINT_NEXT_LINE(whitespace/operators) */ format_gpu<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), num_, channels_, height_, width_, N_, true, top[1]->mutable_gpu_data()); } else { /* NOLINT_NEXT_LINE(whitespace/operators) */ unformat_gpu<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), num_, channels_, height_, width_, N_); } CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void SRNNFormatLayer< Dtype >::Backward_gpu(const vector< Blob<Dtype>* >& top, const vector<bool>& propagate_down, const vector< Blob<Dtype>* >& bottom) { if (!propagate_down[0]) return; const int count = bottom[0]->count(); if ( operation_ == SRNNFormatParameter_Operation_UNFORMAT ) { if (propagate_down[1]) LOG(FATAL) << this->type() << " Layer cannot backpropagate to the stream size input."; Dtype* nullindicator = NULL; /* NOLINT_NEXT_LINE(whitespace/operators) */ format_gpu<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), num_, channels_, height_, width_, N_, false, nullindicator); } else { /* NOLINT_NEXT_LINE(whitespace/operators) */ unformat_gpu<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), num_, channels_, height_, width_, N_); } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SRNNFormatLayer); } // namespace caffe
49ba2e4fcd9ba8916a4c2f51d54a9f4544a23cb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** ---------------------------------------------------------------------------* * @brief The cugraph Jaccard core functionality * * @file jaccard.cu * ---------------------------------------------------------------------------**/ #include "graph_utils.cuh" #include "cugraph.h" #include "rmm_utils.h" #include "utilities/error_utils.h" namespace cugraph { // Volume of neighboors (*weight_s) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_row_sum(IdxType n, IdxType *csrPtr, IdxType *csrInd, ValType *v, ValType *work) { IdxType row, start, end, length; ValType sum; for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) { start = csrPtr[row]; end = csrPtr[row + 1]; length = end - start; //compute row sums if (weighted) { sum = parallel_prefix_sum(length, csrInd + start, v); if (threadIdx.x == 0) work[row] = sum; } else { work[row] = (ValType) length; } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_is(IdxType n, IdxType *csrPtr, IdxType *csrInd, ValType *v, ValType *work, ValType *weight_i, ValType *weight_s) { IdxType i, j, row, col, Ni, Nj; IdxType ref, cur, ref_col, cur_col, match; ValType ref_val; for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) { for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1]; j += gridDim.y * blockDim.y) { col = csrInd[j]; //find which row has least elements (and call it reference row) Ni = csrPtr[row + 1] - csrPtr[row]; Nj = csrPtr[col + 1] - csrPtr[col]; ref = (Ni < Nj) ? row : col; cur = (Ni < Nj) ? col : row; //compute new sum weights weight_s[j] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; i += gridDim.x * blockDim.x) { match = -1; ref_col = csrInd[i]; if (weighted) { ref_val = v[ref_col]; } else { ref_val = 1.0; } //binary search (column indices are sorted within each row) IdxType left = csrPtr[cur]; IdxType right = csrPtr[cur + 1] - 1; while (left <= right) { IdxType middle = (left + right) >> 1; cur_col = csrInd[middle]; if (cur_col > ref_col) { right = middle - 1; } else if (cur_col < ref_col) { left = middle + 1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1) { atomicAdd(&weight_i[j], ref_val); } } } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // Using list of node pairs template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_is_pairs(IdxType num_pairs, IdxType *csrPtr, IdxType *csrInd, IdxType *first_pair, IdxType *second_pair, ValType *v, ValType *work, ValType *weight_i, ValType *weight_s) { IdxType i, idx, row, col, Ni, Nj; IdxType ref, cur, ref_col, cur_col, match; ValType ref_val; for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs; idx += gridDim.z * blockDim.z) { row = first_pair[idx]; col = second_pair[idx]; //find which row has least elements (and call it reference row) Ni = csrPtr[row + 1] - csrPtr[row]; Nj = csrPtr[col + 1] - csrPtr[col]; ref = (Ni < Nj) ? row : col; cur = (Ni < Nj) ? col : row; //compute new sum weights weight_s[idx] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; i += gridDim.x * blockDim.x) { match = -1; ref_col = csrInd[i]; if (weighted) { ref_val = v[ref_col]; } else { ref_val = 1.0; } //binary search (column indices are sorted within each row) IdxType left = csrPtr[cur]; IdxType right = csrPtr[cur + 1] - 1; while (left <= right) { IdxType middle = (left + right) >> 1; cur_col = csrInd[middle]; if (cur_col > ref_col) { right = middle - 1; } else if (cur_col < ref_col) { left = middle + 1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1) { atomicAdd(&weight_i[idx], ref_val); } } } } //Jaccard weights (*weight) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_jw(IdxType e, IdxType *csrPtr, IdxType *csrInd, ValType *weight_i, ValType *weight_s, ValType *weight_j) { IdxType j; ValType Wi, Ws, Wu; for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) { Wi = weight_i[j]; Ws = weight_s[j]; Wu = Ws - Wi; weight_j[j] = (Wi / Wu); } } template<bool weighted, typename IdxType, typename ValType> int jaccard(IdxType n, IdxType e, IdxType *csrPtr, IdxType *csrInd, ValType *weight_in, ValType *work, ValType *weight_i, ValType *weight_s, ValType *weight_j) { dim3 nthreads, nblocks; int y = 4; //setup launch configuration nthreads.x = 32; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS); nblocks.z = 1; //launch kernel hipLaunchKernelGGL(( jaccard_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work); hipDeviceSynchronize(); fill(e, weight_i, (ValType) 0.0); //setup launch configuration nthreads.x = 32 / y; nthreads.y = y; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1; //launch kernel hipLaunchKernelGGL(( jaccard_is<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work, weight_i, weight_s); //setup launch configuration nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; //launch kernel hipLaunchKernelGGL(( jaccard_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, e, csrPtr, csrInd, weight_i, weight_s, weight_j); return 0; } template<bool weighted, typename IdxType, typename ValType> int jaccard_pairs(IdxType n, IdxType num_pairs, IdxType *csrPtr, IdxType *csrInd, IdxType *first_pair, IdxType *second_pair, ValType *weight_in, ValType *work, ValType *weight_i, ValType *weight_s, ValType *weight_j) { dim3 nthreads, nblocks; int y = 4; //setup launch configuration nthreads.x = 32; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS); nblocks.z = 1; //launch kernel hipLaunchKernelGGL(( jaccard_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work); hipDeviceSynchronize(); fill(num_pairs, weight_i, (ValType) 0.0); //setup launch configuration nthreads.x = 32; nthreads.y = 1; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1; //launch kernel hipLaunchKernelGGL(( jaccard_is_pairs<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s); //setup launch configuration nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; //launch kernel hipLaunchKernelGGL(( jaccard_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j); return 0; } } // End cugraph namespace gdf_error gdf_jaccard(gdf_graph *graph, gdf_column *weights, gdf_column *result) { GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE((graph->adjList != nullptr) || (graph->edgeList != nullptr), GDF_INVALID_API_CALL); GDF_REQUIRE(result != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(result->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!result->valid, GDF_VALIDITY_UNSUPPORTED); GDF_TRY(gdf_add_adj_list(graph)); GDF_REQUIRE(graph->adjList != nullptr, GDF_INVALID_API_CALL); bool weighted = (weights != nullptr); gdf_dtype ValueType = result->dtype; gdf_dtype IndexType = graph->adjList->offsets->dtype; void *csrPtr = graph->adjList->offsets->data; void *csrInd = graph->adjList->indices->data; void *weight_i = nullptr; void *weight_s = nullptr; void *weight_j = result->data; void *work = nullptr; void *weight_in = nullptr; if (weighted) weight_in = weights->data; if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<true, int32_t, float>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<false, int32_t, float>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<true, int32_t, double>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<false, int32_t, double>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<true, int64_t, float>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<false, int64_t, float>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<true, int64_t, double>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<false, int64_t, double>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } // Clean up temp arrays ALLOC_FREE_TRY(weight_i, nullptr); ALLOC_FREE_TRY(weight_s, nullptr); ALLOC_FREE_TRY(work, nullptr); return GDF_SUCCESS; } gdf_error gdf_jaccard_list(gdf_graph* graph, gdf_column* weights, gdf_column* first, gdf_column* second, gdf_column* result) { GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE((graph->adjList != nullptr) || (graph->edgeList != nullptr), GDF_INVALID_API_CALL); GDF_REQUIRE(result != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(result->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!result->valid, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(first != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(first->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!first->valid, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(second != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(second->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!second->valid, GDF_VALIDITY_UNSUPPORTED); GDF_TRY(gdf_add_adj_list(graph)); GDF_REQUIRE(graph->adjList != nullptr, GDF_INVALID_API_CALL); bool weighted = (weights != nullptr); gdf_dtype ValueType = result->dtype; gdf_dtype IndexType = graph->adjList->offsets->dtype; GDF_REQUIRE(first->dtype == IndexType, GDF_INVALID_API_CALL); GDF_REQUIRE(second->dtype == IndexType, GDF_INVALID_API_CALL); void *first_pair = first->data; void *second_pair = second->data; void *csrPtr = graph->adjList->offsets->data; void *csrInd = graph->adjList->indices->data; void *weight_i = nullptr; void *weight_s = nullptr; void *weight_j = result->data; void *work = nullptr; void *weight_in = nullptr; if (weighted) weight_in = weights->data; if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<true, int32_t, float>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<false, int32_t, float>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<true, int32_t, double>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<false, int32_t, double>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<true, int64_t, float>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<false, int64_t, float>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<true, int64_t, double>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<false, int64_t, double>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } // Clean up temp arrays ALLOC_FREE_TRY(weight_i, nullptr); ALLOC_FREE_TRY(weight_s, nullptr); ALLOC_FREE_TRY(work, nullptr); return GDF_SUCCESS; }
49ba2e4fcd9ba8916a4c2f51d54a9f4544a23cb6.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** ---------------------------------------------------------------------------* * @brief The cugraph Jaccard core functionality * * @file jaccard.cu * ---------------------------------------------------------------------------**/ #include "graph_utils.cuh" #include "cugraph.h" #include "rmm_utils.h" #include "utilities/error_utils.h" namespace cugraph { // Volume of neighboors (*weight_s) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_row_sum(IdxType n, IdxType *csrPtr, IdxType *csrInd, ValType *v, ValType *work) { IdxType row, start, end, length; ValType sum; for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) { start = csrPtr[row]; end = csrPtr[row + 1]; length = end - start; //compute row sums if (weighted) { sum = parallel_prefix_sum(length, csrInd + start, v); if (threadIdx.x == 0) work[row] = sum; } else { work[row] = (ValType) length; } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_is(IdxType n, IdxType *csrPtr, IdxType *csrInd, ValType *v, ValType *work, ValType *weight_i, ValType *weight_s) { IdxType i, j, row, col, Ni, Nj; IdxType ref, cur, ref_col, cur_col, match; ValType ref_val; for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) { for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1]; j += gridDim.y * blockDim.y) { col = csrInd[j]; //find which row has least elements (and call it reference row) Ni = csrPtr[row + 1] - csrPtr[row]; Nj = csrPtr[col + 1] - csrPtr[col]; ref = (Ni < Nj) ? row : col; cur = (Ni < Nj) ? col : row; //compute new sum weights weight_s[j] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; i += gridDim.x * blockDim.x) { match = -1; ref_col = csrInd[i]; if (weighted) { ref_val = v[ref_col]; } else { ref_val = 1.0; } //binary search (column indices are sorted within each row) IdxType left = csrPtr[cur]; IdxType right = csrPtr[cur + 1] - 1; while (left <= right) { IdxType middle = (left + right) >> 1; cur_col = csrInd[middle]; if (cur_col > ref_col) { right = middle - 1; } else if (cur_col < ref_col) { left = middle + 1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1) { atomicAdd(&weight_i[j], ref_val); } } } } } // Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) // Using list of node pairs template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_is_pairs(IdxType num_pairs, IdxType *csrPtr, IdxType *csrInd, IdxType *first_pair, IdxType *second_pair, ValType *v, ValType *work, ValType *weight_i, ValType *weight_s) { IdxType i, idx, row, col, Ni, Nj; IdxType ref, cur, ref_col, cur_col, match; ValType ref_val; for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs; idx += gridDim.z * blockDim.z) { row = first_pair[idx]; col = second_pair[idx]; //find which row has least elements (and call it reference row) Ni = csrPtr[row + 1] - csrPtr[row]; Nj = csrPtr[col + 1] - csrPtr[col]; ref = (Ni < Nj) ? row : col; cur = (Ni < Nj) ? col : row; //compute new sum weights weight_s[idx] = work[row] + work[col]; //compute new intersection weights //search for the element with the same column index in the reference row for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; i += gridDim.x * blockDim.x) { match = -1; ref_col = csrInd[i]; if (weighted) { ref_val = v[ref_col]; } else { ref_val = 1.0; } //binary search (column indices are sorted within each row) IdxType left = csrPtr[cur]; IdxType right = csrPtr[cur + 1] - 1; while (left <= right) { IdxType middle = (left + right) >> 1; cur_col = csrInd[middle]; if (cur_col > ref_col) { right = middle - 1; } else if (cur_col < ref_col) { left = middle + 1; } else { match = middle; break; } } //if the element with the same column index in the reference row has been found if (match != -1) { atomicAdd(&weight_i[idx], ref_val); } } } } //Jaccard weights (*weight) template<bool weighted, typename IdxType, typename ValType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) jaccard_jw(IdxType e, IdxType *csrPtr, IdxType *csrInd, ValType *weight_i, ValType *weight_s, ValType *weight_j) { IdxType j; ValType Wi, Ws, Wu; for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) { Wi = weight_i[j]; Ws = weight_s[j]; Wu = Ws - Wi; weight_j[j] = (Wi / Wu); } } template<bool weighted, typename IdxType, typename ValType> int jaccard(IdxType n, IdxType e, IdxType *csrPtr, IdxType *csrInd, ValType *weight_in, ValType *work, ValType *weight_i, ValType *weight_s, ValType *weight_j) { dim3 nthreads, nblocks; int y = 4; //setup launch configuration nthreads.x = 32; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS); nblocks.z = 1; //launch kernel jaccard_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work); cudaDeviceSynchronize(); fill(e, weight_i, (ValType) 0.0); //setup launch configuration nthreads.x = 32 / y; nthreads.y = y; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1; //launch kernel jaccard_is<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work, weight_i, weight_s); //setup launch configuration nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; //launch kernel jaccard_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(e, csrPtr, csrInd, weight_i, weight_s, weight_j); return 0; } template<bool weighted, typename IdxType, typename ValType> int jaccard_pairs(IdxType n, IdxType num_pairs, IdxType *csrPtr, IdxType *csrInd, IdxType *first_pair, IdxType *second_pair, ValType *weight_in, ValType *work, ValType *weight_i, ValType *weight_s, ValType *weight_j) { dim3 nthreads, nblocks; int y = 4; //setup launch configuration nthreads.x = 32; nthreads.y = y; nthreads.z = 1; nblocks.x = 1; nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS); nblocks.z = 1; //launch kernel jaccard_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work); cudaDeviceSynchronize(); fill(num_pairs, weight_i, (ValType) 0.0); //setup launch configuration nthreads.x = 32; nthreads.y = 1; nthreads.z = 8; nblocks.x = 1; nblocks.y = 1; nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1; //launch kernel jaccard_is_pairs<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s); //setup launch configuration nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; //launch kernel jaccard_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j); return 0; } } // End cugraph namespace gdf_error gdf_jaccard(gdf_graph *graph, gdf_column *weights, gdf_column *result) { GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE((graph->adjList != nullptr) || (graph->edgeList != nullptr), GDF_INVALID_API_CALL); GDF_REQUIRE(result != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(result->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!result->valid, GDF_VALIDITY_UNSUPPORTED); GDF_TRY(gdf_add_adj_list(graph)); GDF_REQUIRE(graph->adjList != nullptr, GDF_INVALID_API_CALL); bool weighted = (weights != nullptr); gdf_dtype ValueType = result->dtype; gdf_dtype IndexType = graph->adjList->offsets->dtype; void *csrPtr = graph->adjList->offsets->data; void *csrInd = graph->adjList->indices->data; void *weight_i = nullptr; void *weight_s = nullptr; void *weight_j = result->data; void *work = nullptr; void *weight_in = nullptr; if (weighted) weight_in = weights->data; if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<true, int32_t, float>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<false, int32_t, float>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<true, int32_t, double>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<false, int32_t, double>(n, e, (int32_t*) csrPtr, (int32_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<true, int64_t, float>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard<false, int64_t, float>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<true, int64_t, double>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t e = graph->adjList->indices->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * e, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard<false, int64_t, double>(n, e, (int64_t*) csrPtr, (int64_t*) csrInd, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } // Clean up temp arrays ALLOC_FREE_TRY(weight_i, nullptr); ALLOC_FREE_TRY(weight_s, nullptr); ALLOC_FREE_TRY(work, nullptr); return GDF_SUCCESS; } gdf_error gdf_jaccard_list(gdf_graph* graph, gdf_column* weights, gdf_column* first, gdf_column* second, gdf_column* result) { GDF_REQUIRE(graph != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE((graph->adjList != nullptr) || (graph->edgeList != nullptr), GDF_INVALID_API_CALL); GDF_REQUIRE(result != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(result->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!result->valid, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(first != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(first->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!first->valid, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(second != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(second->data != nullptr, GDF_INVALID_API_CALL); GDF_REQUIRE(!second->valid, GDF_VALIDITY_UNSUPPORTED); GDF_TRY(gdf_add_adj_list(graph)); GDF_REQUIRE(graph->adjList != nullptr, GDF_INVALID_API_CALL); bool weighted = (weights != nullptr); gdf_dtype ValueType = result->dtype; gdf_dtype IndexType = graph->adjList->offsets->dtype; GDF_REQUIRE(first->dtype == IndexType, GDF_INVALID_API_CALL); GDF_REQUIRE(second->dtype == IndexType, GDF_INVALID_API_CALL); void *first_pair = first->data; void *second_pair = second->data; void *csrPtr = graph->adjList->offsets->data; void *csrInd = graph->adjList->indices->data; void *weight_i = nullptr; void *weight_s = nullptr; void *weight_j = result->data; void *work = nullptr; void *weight_in = nullptr; if (weighted) weight_in = weights->data; if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<true, int32_t, float>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<false, int32_t, float>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<true, int32_t, double>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) { int32_t n = graph->adjList->offsets->size - 1; int32_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<false, int32_t, double>(n, num_pairs, (int32_t*) csrPtr, (int32_t*) csrInd, (int32_t*) first_pair, (int32_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<true, int64_t, float>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(float) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(float) * n, nullptr); cugraph::jaccard_pairs<false, int64_t, float>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (float*) weight_in, (float*) work, (float*) weight_i, (float*) weight_s, (float*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<true, int64_t, double>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) { int64_t n = graph->adjList->offsets->size - 1; int64_t num_pairs = first->size; ALLOC_MANAGED_TRY(&weight_i, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&weight_s, sizeof(double) * num_pairs, nullptr); ALLOC_MANAGED_TRY(&work, sizeof(double) * n, nullptr); cugraph::jaccard_pairs<false, int64_t, double>(n, num_pairs, (int64_t*) csrPtr, (int64_t*) csrInd, (int64_t*) first_pair, (int64_t*) second_pair, (double*) weight_in, (double*) work, (double*) weight_i, (double*) weight_s, (double*) weight_j); } // Clean up temp arrays ALLOC_FREE_TRY(weight_i, nullptr); ALLOC_FREE_TRY(weight_s, nullptr); ALLOC_FREE_TRY(work, nullptr); return GDF_SUCCESS; }
400e3d8b20efd3a3522c9a07bf94ae08447be24b.hip
// !!! This is a file automatically generated by hipify!!! #include <hiprand/hiprand.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <time.h> /*************************/ /* CURAND ERROR CHECKING */ /*************************/ static const char *_curandReturnErrorString(hiprandStatus_t error) { switch (error) { case 0: return "No errors"; case 100: return "Header file and linked library version do not match"; case 101: return "Generator not initialized"; case 102: return "Memory allocation failed"; case 103: return "Generator is wrong type"; case 104: return "Argument out of range"; case 105: return "Length requested is not a multiple of dimension"; case 106: return "GPU does not have double precision required by MRG32k3a"; case 201: return "Kernel launch failure"; case 202: return "Preexisting failure on library entry"; case 203: return "Initialization of CUDA failed"; case 204: return "Architecture mismatch, GPU does not support requested feature"; case 999: return "Internal library error"; } return "<unknown>"; } inline void __curandCHECK(hiprandStatus_t err, const char *file, const int line) { if (HIPRAND_STATUS_SUCCESS != err) { fprintf(stderr, "CURAND error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \ _curandReturnErrorString(err)); \ assert(0); \ } } extern "C" void curandCHECK(hiprandStatus_t err) { __curandCHECK(err, __FILE__, __LINE__); } /*********************************************************/ /* FUNCTOR TO CHECK IF A POINT IS INSIDE THE UNIT CIRCLE */ /*********************************************************/ struct isInsideCircle { __device__ unsigned int operator() (float2 p) const { return (sqrtf(p.x * p.x + p.y * p.y) < 1.0f) ? 1 : 0; } }; /********/ /* MAIN */ /********/ int main() { // --- Number of integration points const int N = 100000; thrust::device_vector<float2> d_p(N); hiprandGenerator_t rng; // --- Set the type of random number generator curandCHECK(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_MT19937)); // --- Set the seed curandCHECK(hiprandSetPseudoRandomGeneratorSeed(rng, time(NULL))); // --- Generate N numbers in 2 dimensions curandCHECK(hiprandGenerateUniform(rng, (float *)thrust::raw_pointer_cast(&d_p[0]), 2 * N)); // --- Count the points falling inside the unit circle unsigned int total = thrust::transform_reduce(d_p.begin(), d_p.end(), isInsideCircle(), 0, thrust::plus<unsigned int>()); printf("The integral is %f\n", 4.0f * (float)total / (float)N); // --- Destroy the random number generator curandCHECK(hiprandDestroyGenerator(rng)); return 0; }
400e3d8b20efd3a3522c9a07bf94ae08447be24b.cu
#include <curand.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <time.h> /*************************/ /* CURAND ERROR CHECKING */ /*************************/ static const char *_curandReturnErrorString(curandStatus_t error) { switch (error) { case 0: return "No errors"; case 100: return "Header file and linked library version do not match"; case 101: return "Generator not initialized"; case 102: return "Memory allocation failed"; case 103: return "Generator is wrong type"; case 104: return "Argument out of range"; case 105: return "Length requested is not a multiple of dimension"; case 106: return "GPU does not have double precision required by MRG32k3a"; case 201: return "Kernel launch failure"; case 202: return "Preexisting failure on library entry"; case 203: return "Initialization of CUDA failed"; case 204: return "Architecture mismatch, GPU does not support requested feature"; case 999: return "Internal library error"; } return "<unknown>"; } inline void __curandCHECK(curandStatus_t err, const char *file, const int line) { if (CURAND_STATUS_SUCCESS != err) { fprintf(stderr, "CURAND error in file '%s', line %d, error: %s \nterminating!\n", __FILE__, __LINE__, \ _curandReturnErrorString(err)); \ assert(0); \ } } extern "C" void curandCHECK(curandStatus_t err) { __curandCHECK(err, __FILE__, __LINE__); } /*********************************************************/ /* FUNCTOR TO CHECK IF A POINT IS INSIDE THE UNIT CIRCLE */ /*********************************************************/ struct isInsideCircle { __device__ unsigned int operator() (float2 p) const { return (sqrtf(p.x * p.x + p.y * p.y) < 1.0f) ? 1 : 0; } }; /********/ /* MAIN */ /********/ int main() { // --- Number of integration points const int N = 100000; thrust::device_vector<float2> d_p(N); curandGenerator_t rng; // --- Set the type of random number generator curandCHECK(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_MT19937)); // --- Set the seed curandCHECK(curandSetPseudoRandomGeneratorSeed(rng, time(NULL))); // --- Generate N numbers in 2 dimensions curandCHECK(curandGenerateUniform(rng, (float *)thrust::raw_pointer_cast(&d_p[0]), 2 * N)); // --- Count the points falling inside the unit circle unsigned int total = thrust::transform_reduce(d_p.begin(), d_p.end(), isInsideCircle(), 0, thrust::plus<unsigned int>()); printf("The integral is %f\n", 4.0f * (float)total / (float)N); // --- Destroy the random number generator curandCHECK(curandDestroyGenerator(rng)); return 0; }
edf9db4fe622ecb47196f71981b8b5e0ee1b3855.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "device_helpers_hip.cuh" namespace xgboost { // the handler to call instead of hipSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_{access} {} public: Permissions() : access_{GPUAccess::kNone} {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = ::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = ::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : proper_size_{0}, device_{-1}, start_{0}, perm_d_{false}, cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_.data().get(), begin + start_, data_.size() * sizeof(T), hipMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpyAsync(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), hipMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); int device_index = vec_->distribution_.devices_.Index(device_); start_ = vec_->distribution_.ShardStart(new_size, device_index); proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, device_index); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), hipMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(hipSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } T* Raw() { return data_.data().get(); } size_t Start() const { return start_; } size_t DataSize() const { return data_.size(); } Permissions& Perm() { return perm_d_; } Permissions const& Perm() const { return perm_d_; } private: int device_; dh::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_.at(i)); }); } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_.DeviceId(i)); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_.at(distribution_.devices_.Index(device)).Raw(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Raw(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_.at(devices.Index(device)).Raw(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); using SpanInd = typename common::Span<const T>::index_type; return {shards_.at(devices.Index(device)).Raw(), static_cast<SpanInd>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).DataSize(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Start(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), hipMemcpyDeviceToHost)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), hipMemcpyHostToDevice)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { // NOLINT if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Shard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_.at(i)); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Shard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty()); distribution_ = distribution; InitShards(); } void Shard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Shard(GPUDistribution::Block(new_devices)); } void Reshard(const GPUDistribution &distribution) { if (distribution_ == distribution) { return; } LazySyncHost(GPUAccess::kWrite); distribution_ = distribution; shards_.clear(); InitShards(); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Perm().DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_.at(devices.Index(device)).LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_.at(devices.Index(device)).Perm().CanAccess(access); } private: std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_)); delete impl_; impl_ = newImpl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Shard(GPUSet new_devices) const { impl_->Shard(new_devices); } template <typename T> void HostDeviceVector<T>::Shard(const GPUDistribution &distribution) const { impl_->Shard(distribution); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution &distribution) { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
edf9db4fe622ecb47196f71981b8b5e0ee1b3855.cu
/*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "./device_helpers.cuh" namespace xgboost { // the handler to call instead of cudaSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_{access} {} public: Permissions() : access_{GPUAccess::kNone} {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = std::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = std::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : proper_size_{0}, device_{-1}, start_{0}, perm_d_{false}, cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), begin + start_, data_.size() * sizeof(T), cudaMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), cudaMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); int device_index = vec_->distribution_.devices_.Index(device_); start_ = vec_->distribution_.ShardStart(new_size, device_index); proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, device_index); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), cudaMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(cudaSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } T* Raw() { return data_.data().get(); } size_t Start() const { return start_; } size_t DataSize() const { return data_.size(); } Permissions& Perm() { return perm_d_; } Permissions const& Perm() const { return perm_d_; } private: int device_; dh::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_.at(i)); }); } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_.DeviceId(i)); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_.at(distribution_.devices_.Index(device)).Raw(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Raw(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_.at(devices.Index(device)).Raw(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); using SpanInd = typename common::Span<const T>::index_type; return {shards_.at(devices.Index(device)).Raw(), static_cast<SpanInd>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).DataSize(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Start(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), cudaMemcpyDeviceToHost)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), cudaMemcpyHostToDevice)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { // NOLINT if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Shard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_.at(i)); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Shard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty()); distribution_ = distribution; InitShards(); } void Shard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Shard(GPUDistribution::Block(new_devices)); } void Reshard(const GPUDistribution &distribution) { if (distribution_ == distribution) { return; } LazySyncHost(GPUAccess::kWrite); distribution_ = distribution; shards_.clear(); InitShards(); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Perm().DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_.at(devices.Index(device)).LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_.at(devices.Index(device)).Perm().CanAccess(access); } private: std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_)); delete impl_; impl_ = newImpl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Shard(GPUSet new_devices) const { impl_->Shard(new_devices); } template <typename T> void HostDeviceVector<T>::Shard(const GPUDistribution &distribution) const { impl_->Shard(distribution); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution &distribution) { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
087793a55e3bca67706b0a9be8998f60e1fa8dad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "group_points_gpu.h" __global__ void group_points_grad_kernel_stack(int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :return: // grad_features: (N1 + N2 ..., C) gradient of the features int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++){ if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx; idx += pt_idx * nsample + sample_idx; grad_features += (features_batch_start_idx + idx[0]) * C + C_idx; atomicAdd(grad_features, grad_out[0]); } void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :return: // grad_features: (N1 + N2 ..., C) gradient of the features hipError_t err; // dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( group_points_grad_kernel_stack), dim3(blocks), dim3(threads), 0, 0, B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void group_points_kernel_stack(int B, int M, int C, int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :return: // output: (M1 + M2, C, nsample) tensor int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++){ if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; features += features_batch_start_idx * C; idx += pt_idx * nsample + sample_idx; int in_idx = idx[0] * C + C_idx; int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx; out[out_idx] = features[in_idx]; } void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :return: // output: (M1 + M2, C, nsample) tensor hipError_t err; dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( group_points_kernel_stack), dim3(blocks), dim3(threads), 0, 0, B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
087793a55e3bca67706b0a9be8998f60e1fa8dad.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "group_points_gpu.h" __global__ void group_points_grad_kernel_stack(int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :return: // grad_features: (N1 + N2 ..., C) gradient of the features int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++){ if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx; idx += pt_idx * nsample + sample_idx; grad_features += (features_batch_start_idx + idx[0]) * C + C_idx; atomicAdd(grad_features, grad_out[0]); } void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :return: // grad_features: (N1 + N2 ..., C) gradient of the features cudaError_t err; // dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_grad_kernel_stack<<<blocks, threads>>>(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void group_points_kernel_stack(int B, int M, int C, int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :return: // output: (M1 + M2, C, nsample) tensor int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++){ if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; features += features_batch_start_idx * C; idx += pt_idx * nsample + sample_idx; int in_idx = idx[0] * C + C_idx; int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx; out[out_idx] = features[in_idx]; } void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with // :return: // output: (M1 + M2, C, nsample) tensor cudaError_t err; dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_kernel_stack<<<blocks, threads>>>(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
2c3666abb7974f8cc44a7917c2648f051d2c6f2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_post_pre_advec_x; int xdim0_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim0_advec_mom_kernel_post_pre_advec_x; int ydim0_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim1_advec_mom_kernel_post_pre_advec_x; int xdim1_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim1_advec_mom_kernel_post_pre_advec_x; int ydim1_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim2_advec_mom_kernel_post_pre_advec_x; int xdim2_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim2_advec_mom_kernel_post_pre_advec_x; int ydim2_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim3_advec_mom_kernel_post_pre_advec_x; int xdim3_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim3_advec_mom_kernel_post_pre_advec_x; int ydim3_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim4_advec_mom_kernel_post_pre_advec_x; int xdim4_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim4_advec_mom_kernel_post_pre_advec_x; int ydim4_advec_mom_kernel_post_pre_advec_x_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_post_pre_advec_x*(y)+xdim0_advec_mom_kernel_post_pre_advec_x*ydim0_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_post_pre_advec_x*(y)+xdim1_advec_mom_kernel_post_pre_advec_x*ydim1_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_post_pre_advec_x*(y)+xdim2_advec_mom_kernel_post_pre_advec_x*ydim2_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_post_pre_advec_x*(y)+xdim3_advec_mom_kernel_post_pre_advec_x*ydim3_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_post_pre_advec_x*(y)+xdim4_advec_mom_kernel_post_pre_advec_x*ydim4_advec_mom_kernel_post_pre_advec_x*(z)) //user function __device__ inline void advec_mom_kernel_post_pre_advec_x_gpu( double *node_mass_post, const double *post_vol, const double *density1, double *node_mass_pre, const double *node_flux) { node_mass_post[OPS_ACC0(0,0,0)] = 0.125 * ( density1[OPS_ACC2(0,-1,0)] * post_vol[OPS_ACC1(0,-1,0)] + density1[OPS_ACC2(0,0,0)] * post_vol[OPS_ACC1(0,0,0)] + density1[OPS_ACC2(-1,-1,0)] * post_vol[OPS_ACC1(-1,-1,0)] + density1[OPS_ACC2(-1,0,0)] * post_vol[OPS_ACC1(-1,0,0)] + density1[OPS_ACC2(0,-1,-1)] * post_vol[OPS_ACC1(0,-1,-1)] + density1[OPS_ACC2(0,0,-1)] * post_vol[OPS_ACC1(0,0,-1)] + density1[OPS_ACC2(-1,-1,-1)] * post_vol[OPS_ACC1(-1,-1,-1)] + density1[OPS_ACC2(-1,0,-1)] * post_vol[OPS_ACC1(-1,0,-1)] ); node_mass_pre[OPS_ACC3(0,0,0)] = node_mass_post[OPS_ACC0(0,0,0)] - node_flux[OPS_ACC4(-1,0,0)] + node_flux[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_post_pre_advec_x( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim0_advec_mom_kernel_post_pre_advec_x * ydim0_advec_mom_kernel_post_pre_advec_x; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim1_advec_mom_kernel_post_pre_advec_x * ydim1_advec_mom_kernel_post_pre_advec_x; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim2_advec_mom_kernel_post_pre_advec_x * ydim2_advec_mom_kernel_post_pre_advec_x; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim3_advec_mom_kernel_post_pre_advec_x * ydim3_advec_mom_kernel_post_pre_advec_x; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim4_advec_mom_kernel_post_pre_advec_x * ydim4_advec_mom_kernel_post_pre_advec_x; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_post_pre_advec_x_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_post_pre_advec_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,127)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(127,"advec_mom_kernel_post_pre_advec_x"); OPS_kernels[127].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_post_pre_advec_x_h || ydim0 != ydim0_advec_mom_kernel_post_pre_advec_x_h || xdim1 != xdim1_advec_mom_kernel_post_pre_advec_x_h || ydim1 != ydim1_advec_mom_kernel_post_pre_advec_x_h || xdim2 != xdim2_advec_mom_kernel_post_pre_advec_x_h || ydim2 != ydim2_advec_mom_kernel_post_pre_advec_x_h || xdim3 != xdim3_advec_mom_kernel_post_pre_advec_x_h || ydim3 != ydim3_advec_mom_kernel_post_pre_advec_x_h || xdim4 != xdim4_advec_mom_kernel_post_pre_advec_x_h || ydim4 != ydim4_advec_mom_kernel_post_pre_advec_x_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel_post_pre_advec_x, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_post_pre_advec_x_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel_post_pre_advec_x, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_post_pre_advec_x_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel_post_pre_advec_x, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_post_pre_advec_x_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel_post_pre_advec_x, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_post_pre_advec_x_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel_post_pre_advec_x, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_post_pre_advec_x_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel_post_pre_advec_x, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_post_pre_advec_x_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel_post_pre_advec_x, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_post_pre_advec_x_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel_post_pre_advec_x, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_post_pre_advec_x_h = ydim3; hipMemcpyToSymbol( xdim4_advec_mom_kernel_post_pre_advec_x, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_post_pre_advec_x_h = xdim4; hipMemcpyToSymbol( ydim4_advec_mom_kernel_post_pre_advec_x, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_post_pre_advec_x_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[127].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_post_pre_advec_x), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[127].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[127].mpi_time += t2-t1; OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 127; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 127; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_post_pre_advec_x_execute; if (OPS_diags > 1) { ops_timing_realloc(127,"advec_mom_kernel_post_pre_advec_x"); } ops_enqueue_kernel(desc); } #endif
2c3666abb7974f8cc44a7917c2648f051d2c6f2b.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_post_pre_advec_x; int xdim0_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim0_advec_mom_kernel_post_pre_advec_x; int ydim0_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim1_advec_mom_kernel_post_pre_advec_x; int xdim1_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim1_advec_mom_kernel_post_pre_advec_x; int ydim1_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim2_advec_mom_kernel_post_pre_advec_x; int xdim2_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim2_advec_mom_kernel_post_pre_advec_x; int ydim2_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim3_advec_mom_kernel_post_pre_advec_x; int xdim3_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim3_advec_mom_kernel_post_pre_advec_x; int ydim3_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int xdim4_advec_mom_kernel_post_pre_advec_x; int xdim4_advec_mom_kernel_post_pre_advec_x_h = -1; __constant__ int ydim4_advec_mom_kernel_post_pre_advec_x; int ydim4_advec_mom_kernel_post_pre_advec_x_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_post_pre_advec_x*(y)+xdim0_advec_mom_kernel_post_pre_advec_x*ydim0_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_post_pre_advec_x*(y)+xdim1_advec_mom_kernel_post_pre_advec_x*ydim1_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_post_pre_advec_x*(y)+xdim2_advec_mom_kernel_post_pre_advec_x*ydim2_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_post_pre_advec_x*(y)+xdim3_advec_mom_kernel_post_pre_advec_x*ydim3_advec_mom_kernel_post_pre_advec_x*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_post_pre_advec_x*(y)+xdim4_advec_mom_kernel_post_pre_advec_x*ydim4_advec_mom_kernel_post_pre_advec_x*(z)) //user function __device__ inline void advec_mom_kernel_post_pre_advec_x_gpu( double *node_mass_post, const double *post_vol, const double *density1, double *node_mass_pre, const double *node_flux) { node_mass_post[OPS_ACC0(0,0,0)] = 0.125 * ( density1[OPS_ACC2(0,-1,0)] * post_vol[OPS_ACC1(0,-1,0)] + density1[OPS_ACC2(0,0,0)] * post_vol[OPS_ACC1(0,0,0)] + density1[OPS_ACC2(-1,-1,0)] * post_vol[OPS_ACC1(-1,-1,0)] + density1[OPS_ACC2(-1,0,0)] * post_vol[OPS_ACC1(-1,0,0)] + density1[OPS_ACC2(0,-1,-1)] * post_vol[OPS_ACC1(0,-1,-1)] + density1[OPS_ACC2(0,0,-1)] * post_vol[OPS_ACC1(0,0,-1)] + density1[OPS_ACC2(-1,-1,-1)] * post_vol[OPS_ACC1(-1,-1,-1)] + density1[OPS_ACC2(-1,0,-1)] * post_vol[OPS_ACC1(-1,0,-1)] ); node_mass_pre[OPS_ACC3(0,0,0)] = node_mass_post[OPS_ACC0(0,0,0)] - node_flux[OPS_ACC4(-1,0,0)] + node_flux[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_post_pre_advec_x( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim0_advec_mom_kernel_post_pre_advec_x * ydim0_advec_mom_kernel_post_pre_advec_x; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim1_advec_mom_kernel_post_pre_advec_x * ydim1_advec_mom_kernel_post_pre_advec_x; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim2_advec_mom_kernel_post_pre_advec_x * ydim2_advec_mom_kernel_post_pre_advec_x; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim3_advec_mom_kernel_post_pre_advec_x * ydim3_advec_mom_kernel_post_pre_advec_x; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel_post_pre_advec_x + idx_z * 1*1 * xdim4_advec_mom_kernel_post_pre_advec_x * ydim4_advec_mom_kernel_post_pre_advec_x; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_post_pre_advec_x_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_post_pre_advec_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,127)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(127,"advec_mom_kernel_post_pre_advec_x"); OPS_kernels[127].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_post_pre_advec_x_h || ydim0 != ydim0_advec_mom_kernel_post_pre_advec_x_h || xdim1 != xdim1_advec_mom_kernel_post_pre_advec_x_h || ydim1 != ydim1_advec_mom_kernel_post_pre_advec_x_h || xdim2 != xdim2_advec_mom_kernel_post_pre_advec_x_h || ydim2 != ydim2_advec_mom_kernel_post_pre_advec_x_h || xdim3 != xdim3_advec_mom_kernel_post_pre_advec_x_h || ydim3 != ydim3_advec_mom_kernel_post_pre_advec_x_h || xdim4 != xdim4_advec_mom_kernel_post_pre_advec_x_h || ydim4 != ydim4_advec_mom_kernel_post_pre_advec_x_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel_post_pre_advec_x, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_post_pre_advec_x_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel_post_pre_advec_x, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_post_pre_advec_x_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel_post_pre_advec_x, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_post_pre_advec_x_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel_post_pre_advec_x, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_post_pre_advec_x_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel_post_pre_advec_x, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_post_pre_advec_x_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel_post_pre_advec_x, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_post_pre_advec_x_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel_post_pre_advec_x, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_post_pre_advec_x_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel_post_pre_advec_x, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_post_pre_advec_x_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_mom_kernel_post_pre_advec_x, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_post_pre_advec_x_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_mom_kernel_post_pre_advec_x, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_post_pre_advec_x_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[127].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_post_pre_advec_x<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[127].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[127].mpi_time += t2-t1; OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[127].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 127; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 127; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_post_pre_advec_x_execute; if (OPS_diags > 1) { ops_timing_realloc(127,"advec_mom_kernel_post_pre_advec_x"); } ops_enqueue_kernel(desc); } #endif
350b80f9b0f46e4319db5074f618333d31c45029.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <unitarization_links.h> #include <comm_quda.h> #include <gauge_fix_ovr_extra.h> #include <gauge_fix_ovr_hit_devf.cuh> #include <cub_helper.cuh> #include <index_helper.cuh> namespace quda { #ifdef GPU_GAUGE_ALG static int numParams = 18; #define LAUNCH_KERNEL_GAUGEFIX(kernel, tp, stream, arg, parity, ...) \ if (tp.aux.x == 0) { \ switch (tp.block.x) { \ case 256:hipLaunchKernelGGL(( kernel<0, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<0, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<0, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<0, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 1) { \ switch (tp.block.x) { \ case 256:hipLaunchKernelGGL(( kernel<1, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<1, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<1, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<1, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 2) { \ switch (tp.block.x) { \ case 256:hipLaunchKernelGGL(( kernel<2, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<2, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<2, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<2, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 3) { \ switch (tp.block.x) { \ case 128:hipLaunchKernelGGL(( kernel<3, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 256:hipLaunchKernelGGL(( kernel<3, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 384:hipLaunchKernelGGL(( kernel<3, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<3, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 640:hipLaunchKernelGGL(( kernel<3, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<3, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 896:hipLaunchKernelGGL(( kernel<3, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<3, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 4) { \ switch (tp.block.x) { \ case 128:hipLaunchKernelGGL(( kernel<4, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 256:hipLaunchKernelGGL(( kernel<4, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 384:hipLaunchKernelGGL(( kernel<4, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<4, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 640:hipLaunchKernelGGL(( kernel<4, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<4, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 896:hipLaunchKernelGGL(( kernel<4, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<4, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 5) { \ switch (tp.block.x) { \ case 128:hipLaunchKernelGGL(( kernel<5, 32, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 256:hipLaunchKernelGGL(( kernel<5, 64, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 384:hipLaunchKernelGGL(( kernel<5, 96, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 512:hipLaunchKernelGGL(( kernel<5, 128, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 640:hipLaunchKernelGGL(( kernel<5, 160, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 768:hipLaunchKernelGGL(( kernel<5, 192, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 896:hipLaunchKernelGGL(( kernel<5, 224, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ case 1024:hipLaunchKernelGGL(( kernel<5, 256, __VA_ARGS__>), dim3(tp.grid.x), dim3(tp.block.x), tp.shared_bytes, stream, arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else { \ errorQuda("Not implemented for %d", tp.aux.x); \ } /** * @brief container to pass parameters for the gauge fixing quality kernel */ template <typename Gauge> struct GaugeFixQualityArg : public ReduceArg<double2> { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data) : ReduceArg<double2>(), dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } threads = X[0]*X[1]*X[2]*X[3]/2; } double getAction(){ return result_h[0].x; } double getTheta(){ return result_h[0].y; } }; /** * @brief Measure gauge fixing quality */ template<int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix_quality(GaugeFixQualityArg<Gauge> argQ){ typedef complex<Float> Cmplx; int idx_cb = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; double2 data = make_double2(0.0,0.0); while (idx_cb < argQ.threads) { int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = argQ.X[dr]; int x[4]; getCoords(x, idx_cb, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += argQ.border[dr]; X[dr] += 2 * argQ.border[dr]; } #endif Matrix<Cmplx,3> delta; setZero(&delta); //load upward links for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U = argQ.dataOr(mu, linkIndex(x, X), parity); delta -= U; } //18*gauge_dir data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x; //2 //load downward links for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U = argQ.dataOr(mu, linkIndexM1(x,X,mu), 1 - parity); delta += U; } //18*gauge_dir delta -= conj(delta); //18 SubTraceUnit(delta); //12 data.y += getRealTraceUVdagger(delta, delta); //35 //T=36*gauge_dir+65 idx_cb += blockDim.x * gridDim.x; } reduce2d<blockSize,2>(argQ, data); } /** * @brief Tunable object for the gauge fixing quality kernel */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixQuality : TunableLocalParity { GaugeFixQualityArg<Gauge> argQ; mutable char aux_string[128]; // used as a label in the autotuner private: bool tuneGridDim() const { return true; } public: GaugeFixQuality(GaugeFixQualityArg<Gauge> &argQ) : argQ(argQ) { } ~GaugeFixQuality () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); argQ.result_h[0] = make_double2(0.0,0.0); LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Float, Gauge, gauge_dir); qudaDeviceSynchronize(); if ( comm_size() != 1 ) comm_allreduce_array((double*)argQ.result_h, 2); argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads * comm_size()); argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads * comm_size()); } TuneKey tuneKey() const { std::stringstream vol; vol << argQ.X[0] << "x"; vol << argQ.X[1] << "x"; vol << argQ.X[2] << "x"; vol << argQ.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return (36LL * gauge_dir + 65LL) * 2 * argQ.threads; } // Only correct if there is no link reconstruction, no cub reduction accounted also //long long bytes() const { return (1)*2*gauge_dir*argQ.dataOr.Bytes(); }//no accounting the reduction!!!! argQ.dataOr.Bytes() return 0.... long long bytes() const { return 2LL * gauge_dir * 2 * argQ.threads * numParams * sizeof(Float); } //no accounting the reduction!!!! }; /** * @brief container to pass parameters for the gauge fixing kernel */ template <typename Float, typename Gauge> struct GaugeFixArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost) : dataOr(dataOr), data(data), relax_boost(relax_boost) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; /** * @brief Kernel to perform gauge fixing with overrelaxation for single-GPU */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix(GaugeFixArg<Float, Gauge> arg, int parity){ typedef complex<Float> Cmplx; int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; // 8 threads per lattice site if ( ImplementationType < 3 ) { int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; getCoords(x, idx, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #endif int mu = (threadIdx.x / blockSize); int oddbit = parity; if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; oddbit = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, oddbit); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, oddbit) = link; } // 4 threads per lattice site else{ int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; getCoords(x, idx, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #endif int mu = (threadIdx.x / blockSize); idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; //load upward link Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; //load downward link Matrix<Cmplx,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the gauge fixing kernel */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFix : Tunable { GaugeFixArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim (TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix(GaugeFixArg<Float, Gauge> &arg) : arg(arg), parity(0) { } ~GaugeFix () { } void setParity(const int par){ parity = par; } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFix, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } //no accounting the reduction!!!! }; #ifdef MULTI_GPU template <typename Float, typename Gauge> struct GaugeFixInteriorPointsArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixInteriorPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost) : dataOr(dataOr), data(data), relax_boost(relax_boost) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { if ( comm_dim_partitioned(dir)) border[dir] = data.R()[dir] + 1; //skip BORDER_RADIUS + face border point else border[dir] = 0; } for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir] - border[dir] * 2; #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; /** * @brief Kernel to perform gauge fixing with overrelaxation in the interior points for multi-GPU implementation */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> arg, int parity){ int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; typedef complex<Float> Complex; int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; #ifdef MULTI_GPU int za = (idx / (X[0] / 2)); int zb = (za / X[1]); x[1] = za - zb * X[1]; x[3] = (zb / X[2]); x[2] = zb - x[3] * X[2]; int p = 0; for ( int dr = 0; dr < 4; ++dr ) p += arg.border[dr]; p = p & 1; int x1odd = (x[1] + x[2] + x[3] + parity + p) & 1; //int x1odd = (x[1] + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + x1odd) - za * X[0]; for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #else getCoords(x, idx, X, parity); #endif int mu = (threadIdx.x / blockSize); // 8 threads per lattice site if ( ImplementationType < 3 ) { if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; parity = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link = arg.dataOr(mu, idx, parity); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; } // 4 threads per lattice site else{ idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the interior points of the gauge fixing * kernel in multi-GPU implementation */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixInteriorPoints : Tunable { GaugeFixInteriorPointsArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim (TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) {} ~GaugeFixInteriorPoints () { } void setParity(const int par) { parity = par; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFixInteriorPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } // Only correct if there is no link reconstruction load+save }; template <typename Float, typename Gauge> struct GaugeFixBorderPointsArg { int threads; // number of active threads required int X[4]; // grid dimensions int border[4]; int *borderpoints[2]; int *faceindicessize[2]; size_t faceVolume[4]; size_t faceVolumeCB[4]; Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixBorderPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost, size_t faceVolume_[4], size_t faceVolumeCB_[4]) : dataOr(dataOr), data(data), relax_boost(relax_boost) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; border[dir] = data.R()[dir]; } /*for(int dir=0; dir<4; ++dir){ if(comm_dim_partitioned(dir)) border[dir] = BORDER_RADIUS; else border[dir] = 0; } for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;*/ for ( int dir = 0; dir < 4; ++dir ) { faceVolume[dir] = faceVolume_[dir]; faceVolumeCB[dir] = faceVolumeCB_[dir]; } if ( comm_partitioned() ) PreCalculateLatticeIndices(faceVolume, faceVolumeCB, X, border, threads, borderpoints); } }; /** * @brief Kernel to perform gauge fixing with overrelaxation in the border points for multi-GPU implementation */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> arg, int parity){ typedef complex<Float> Cmplx; int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; int mu = (threadIdx.x / blockSize); idx = arg.borderpoints[parity][idx]; int X[4], x[4]; x[3] = idx / (arg.X[0] * arg.X[1] * arg.X[2]); x[2] = (idx / (arg.X[0] * arg.X[1])) % arg.X[2]; x[1] = (idx / arg.X[0]) % arg.X[1]; x[0] = idx % arg.X[0]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr] + 2 * arg.border[dr]; // 8 threads per lattice site if ( ImplementationType < 3 ) { if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; parity = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; } // 4 threads per lattice site else{ idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the border points of the gauge fixing kernel in multi-GPU implementation */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixBorderPoints : Tunable { GaugeFixBorderPointsArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim(TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) { } ~GaugeFixBorderPoints () { if ( comm_partitioned() ) for ( int i = 0; i < 2; i++ ) pool_device_free(arg.borderpoints[i]); } void setParity(const int par){ parity = par; } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFixBorderPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } // Only correct if there is no link reconstruction load+save }; template <typename Gauge> struct GaugeFixUnPackArg { int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; GaugeFixUnPackArg(Gauge & dataOr, cudaGaugeField & data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } } }; template<int NElems, typename Float, typename Gauge, bool pack> __global__ void Kernel_UnPackGhost(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= size ) return; int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; int borderid = 0; parity = 1 - parity; switch ( face ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } x[face] -= 1; parity = 1 - parity; int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; typedef complex<Float> Cmplx; typedef typename mapper<Float>::type RegType; RegType tmp[NElems]; Cmplx data[9]; if ( pack ) { arg.dataOr.load(data, id, dir, parity); arg.dataOr.reconstruct.Pack(tmp, data, id); for ( int i = 0; i < NElems / 2; ++i ) { array[idx + size * i] = Cmplx(tmp[2*i+0], tmp[2*i+1]); } } else { for ( int i = 0; i < NElems / 2; ++i ) { tmp[2*i+0] = array[idx + size * i].real(); tmp[2*i+1] = array[idx + size * i].imag(); } arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R); arg.dataOr.save(data, id, dir, parity); } } template<int NElems, typename Float, typename Gauge, bool pack> __global__ void Kernel_UnPackTop(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= size ) return; int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; int borderid = arg.X[face] - 1; switch ( face ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; typedef complex<Float> Cmplx; typedef typename mapper<Float>::type RegType; RegType tmp[NElems]; Cmplx data[9]; if ( pack ) { arg.dataOr.load(data, id, dir, parity); arg.dataOr.reconstruct.Pack(tmp, data, id); for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = Cmplx(tmp[2*i+0], tmp[2*i+1]); } else{ for ( int i = 0; i < NElems / 2; ++i ) { tmp[2*i+0] = array[idx + size * i].real(); tmp[2*i+1] = array[idx + size * i].imag(); } arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R); arg.dataOr.save(data, id, dir, parity); } } #endif template<typename Float, typename Gauge, int NElems, int gauge_dir> void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { TimeProfile profileInternalGaugeFixOVR("InternalGaugeFixQudaOVR", false); profileInternalGaugeFixOVR.TPSTART(QUDA_PROFILE_COMPUTE); double flop = 0; double byte = 0; printfQuda("\tOverrelaxation boost parameter: %lf\n", (double)relax_boost); printfQuda("\tStop criterium: %lf\n", tolerance); if ( stopWtheta ) printfQuda("\tStop criterium method: theta\n"); else printfQuda("\tStop criterium method: Delta\n"); printfQuda("\tMaximum number of iterations: %d\n", Nsteps); printfQuda("\tReunitarize at every %d steps\n", reunit_interval); printfQuda("\tPrint convergence results at every %d steps\n", verbose_interval); const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures = 0; int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int))); hipMemset(num_failures_dev, 0, sizeof(int)); GaugeFixQualityArg<Gauge> argQ(dataOr, data); GaugeFixQuality<Float,Gauge, gauge_dir> GaugeFixQuality(argQ); GaugeFixArg<Float, Gauge> arg(dataOr, data, relax_boost); GaugeFix<Float,Gauge, gauge_dir> gaugeFix(arg); #ifdef MULTI_GPU void *send[4]; void *recv[4]; void *sendg[4]; void *recvg[4]; void *send_d[4]; void *recv_d[4]; void *sendg_d[4]; void *recvg_d[4]; void *hostbuffer_h[4]; hipStream_t GFStream[9]; size_t offset[4]; size_t bytes[4]; size_t faceVolume[4]; size_t faceVolumeCB[4]; // do the exchange MsgHandle *mh_recv_back[4]; MsgHandle *mh_recv_fwd[4]; MsgHandle *mh_send_fwd[4]; MsgHandle *mh_send_back[4]; int X[4]; dim3 block[4]; dim3 grid[4]; if ( comm_partitioned() ) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; if ( !commDimPartitioned(dir) && data.R()[dir] != 0 ) errorQuda("Not supported!\n"); } for ( int i = 0; i < 4; i++ ) { faceVolume[i] = 1; for ( int j = 0; j < 4; j++ ) { if ( i == j ) continue; faceVolume[i] *= X[j]; } faceVolumeCB[i] = faceVolume[i] / 2; } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; offset[d] = faceVolumeCB[d] * NElems; bytes[d] = sizeof(Float) * offset[d]; send_d[d] = device_malloc(bytes[d]); recv_d[d] = device_malloc(bytes[d]); sendg_d[d] = device_malloc(bytes[d]); recvg_d[d] = device_malloc(bytes[d]); hipStreamCreate(&GFStream[d]); hipStreamCreate(&GFStream[4 + d]); #ifndef GPU_COMMS hostbuffer_h[d] = (void*)pinned_malloc(4 * bytes[d]); #endif block[d] = make_uint3(128, 1, 1); grid[d] = make_uint3((faceVolumeCB[d] + block[d].x - 1) / block[d].x, 1, 1); } hipStreamCreate(&GFStream[8]); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS recv[d] = recv_d[d]; send[d] = send_d[d]; recvg[d] = recvg_d[d]; sendg[d] = sendg_d[d]; #else recv[d] = hostbuffer_h[d]; send[d] = static_cast<char*>(hostbuffer_h[d]) + bytes[d]; recvg[d] = static_cast<char*>(hostbuffer_h[d]) + 3 * bytes[d]; sendg[d] = static_cast<char*>(hostbuffer_h[d]) + 2 * bytes[d]; #endif mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]); mh_recv_fwd[d] = comm_declare_receive_relative(recvg[d], d, +1, bytes[d]); mh_send_back[d] = comm_declare_send_relative(sendg[d], d, -1, bytes[d]); mh_send_fwd[d] = comm_declare_send_relative(send[d], d, +1, bytes[d]); } } GaugeFixUnPackArg<Gauge> dataexarg(dataOr, data); GaugeFixBorderPointsArg<Float, Gauge> argBorder(dataOr, data, relax_boost, faceVolume, faceVolumeCB); GaugeFixBorderPoints<Float,Gauge, gauge_dir> gfixBorderPoints(argBorder); GaugeFixInteriorPointsArg<Float, Gauge> argInt(dataOr, data, relax_boost); GaugeFixInteriorPoints<Float,Gauge, gauge_dir> gfixIntPoints(argInt); #endif GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action0 = argQ.getAction(); printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta()); unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost); if ( num_failures > 0 ) { pool_device_free(num_failures_dev); errorQuda("Error in the unitarization\n"); exit(1); } hipMemset(num_failures_dev, 0, sizeof(int)); int iter = 0; for ( iter = 0; iter < Nsteps; iter++ ) { for ( int p = 0; p < 2; p++ ) { #ifndef MULTI_GPU gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); #else if ( !comm_partitioned() ) { gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); } else{ gfixIntPoints.setParity(p); gfixBorderPoints.setParity(p); //compute border points gfixBorderPoints.apply(0); flop += (double)gfixBorderPoints.flops(); byte += (double)gfixBorderPoints.bytes(); flop += (double)gfixIntPoints.flops(); byte += (double)gfixIntPoints.bytes(); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_start(mh_recv_back[d]); comm_start(mh_recv_fwd[d]); } //wait for the update to the halo points before start packing... qudaDeviceSynchronize(); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; //extract top face hipLaunchKernelGGL(( Kernel_UnPackTop<NElems, Float, Gauge, true>) , dim3(grid[d]), dim3(block[d]), 0, GFStream[d] , faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(send_d[d]), p, d, d); //extract bottom ghost hipLaunchKernelGGL(( Kernel_UnPackGhost<NElems, Float, Gauge, true>) , dim3(grid[d]), dim3(block[d]), 0, GFStream[4 + d] , faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(sendg_d[d]), 1 - p, d, d); } #ifdef GPU_COMMS for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; qudaStreamSynchronize(GFStream[d]); comm_start(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[4 + d]); comm_start(mh_send_back[d]); } #else for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; hipMemcpyAsync(send[d], send_d[d], bytes[d], hipMemcpyDeviceToHost, GFStream[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; hipMemcpyAsync(sendg[d], sendg_d[d], bytes[d], hipMemcpyDeviceToHost, GFStream[4 + d]); } #endif //compute interior points gfixIntPoints.apply(GFStream[8]); #ifndef GPU_COMMS for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; qudaStreamSynchronize(GFStream[d]); comm_start(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[4 + d]); comm_start(mh_send_back[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_recv_back[d]); hipMemcpyAsync(recv_d[d], recv[d], bytes[d], hipMemcpyHostToDevice, GFStream[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_recv_fwd[d]); hipMemcpyAsync(recvg_d[d], recvg[d], bytes[d], hipMemcpyHostToDevice, GFStream[4 + d]); } #endif for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS comm_wait(mh_recv_back[d]); #endif hipLaunchKernelGGL(( Kernel_UnPackGhost<NElems, Float, Gauge, false>) , dim3(grid[d]), dim3(block[d]), 0, GFStream[d] , faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recv_d[d]), p, d, d); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS comm_wait(mh_recv_fwd[d]); #endif hipLaunchKernelGGL(( Kernel_UnPackTop<NElems, Float, Gauge, false>) , dim3(grid[d]), dim3(block[d]), 0, GFStream[4 + d] , faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recvg_d[d]), 1 - p, d, d); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_send_back[d]); comm_wait(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[d]); qudaStreamSynchronize(GFStream[4 + d]); } qudaStreamSynchronize(GFStream[8]); } #endif /*gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); #ifdef MULTI_GPU if(comm_partitioned()){//exchange updated top face links in current parity for (int d=0; d<4; d++) { if (!commDimPartitioned(d)) continue; comm_start(mh_recv_back[d]); //extract top face Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(send_d[d]), p, d, d, true); #ifndef GPU_COMMS hipMemcpy(send[d], send_d[d], bytes[d], hipMemcpyDeviceToHost); #else qudaDeviceSynchronize(); #endif comm_start(mh_send_fwd[d]); comm_wait(mh_recv_back[d]); comm_wait(mh_send_fwd[d]); #ifndef GPU_COMMS hipMemcpy(recv_d[d], recv[d], bytes[d], hipMemcpyHostToDevice); #endif //inject top face in ghost Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recv_d[d]), p, d, d, false); } //exchange updated ghost links in opposite parity for (int d=0; d<4; d++) { if (!commDimPartitioned(d)) continue; comm_start(mh_recv_fwd[d]); Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(sendg_d[d]), 1-p, d, d, true); #ifndef GPU_COMMS hipMemcpy(sendg[d], sendg_d[d], bytes[d], hipMemcpyDeviceToHost); #else qudaDeviceSynchronize(); #endif comm_start(mh_send_back[d]); comm_wait(mh_recv_fwd[d]); comm_wait(mh_send_back[d]); #ifndef GPU_COMMS hipMemcpy(recvg_d[d], recvg[d], bytes[d], hipMemcpyHostToDevice); #endif Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recvg_d[d]), 1-p, d, d, false); } } #endif*/ } if ((iter % reunit_interval) == (reunit_interval - 1)) { unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost); if ( num_failures > 0 ) errorQuda("Error in the unitarization\n"); hipMemset(num_failures_dev, 0, sizeof(int)); flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes(); } GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action = argQ.getAction(); double diff = abs(action0 - action); if ((iter % verbose_interval) == (verbose_interval - 1)) printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; } else{ if ( diff < tolerance ) break; } action0 = action; } if ((iter % reunit_interval) != 0 ) { unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost); if ( num_failures > 0 ) errorQuda("Error in the unitarization\n"); hipMemset(num_failures_dev, 0, sizeof(int)); flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes(); } if ((iter % verbose_interval) != 0 ) { GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action = argQ.getAction(); double diff = abs(action0 - action); printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); } pool_device_free(num_failures_dev); #ifdef MULTI_GPU if ( comm_partitioned() ) { data.exchangeExtendedGhost(data.R(),false); for ( int d = 0; d < 4; d++ ) { if ( commDimPartitioned(d)) { comm_free(mh_send_fwd[d]); comm_free(mh_send_back[d]); comm_free(mh_recv_back[d]); comm_free(mh_recv_fwd[d]); device_free(send_d[d]); device_free(recv_d[d]); device_free(sendg_d[d]); device_free(recvg_d[d]); hipStreamDestroy(GFStream[d]); hipStreamDestroy(GFStream[4 + d]); #ifndef GPU_COMMS host_free(hostbuffer_h[d]); #endif } } hipStreamDestroy(GFStream[8]); } #endif checkCudaError(); qudaDeviceSynchronize(); profileInternalGaugeFixOVR.TPSTOP(QUDA_PROFILE_COMPUTE); if (getVerbosity() > QUDA_SUMMARIZE){ double secs = profileInternalGaugeFixOVR.Last(QUDA_PROFILE_COMPUTE); double gflops = (flop * 1e-9) / (secs); double gbytes = byte / (secs * 1e9); #ifdef MULTI_GPU printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size()); #else printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes); #endif } } template<typename Float, int NElems, typename Gauge> void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { if ( gauge_dir != 3 ) { printfQuda("Starting Landau gauge fixing...\n"); gaugefixingOVR<Float, Gauge, NElems, 4>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { printfQuda("Starting Coulomb gauge fixing...\n"); gaugefixingOVR<Float, Gauge, NElems, 3>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } } template<typename Float> void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { // Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12 if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { //printfQuda("QUDA_RECONSTRUCT_NO\n"); numParams = 18; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; gaugefixingOVR<Float, 18>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { //printfQuda("QUDA_RECONSTRUCT_12\n"); numParams = 12; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; gaugefixingOVR<Float, 12>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { //printfQuda("QUDA_RECONSTRUCT_8\n"); numParams = 8; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; gaugefixingOVR<Float, 8>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** * @brief Gauge fixing with overrelaxation with support for single and multi GPU. * @param[in,out] data, quda gauge field * @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing * @param[in] Nsteps, maximum number of steps to perform gauge fixing * @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this * @param[in] relax_boost, gauge fixing parameter of the overrelaxation method, most common value is 1.5 or 1.7. * @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps * @param[in] reunit_interval, reunitarize gauge field when iteration count is a multiple of this * @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value */ void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const double relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_HALF_PRECISION ) { errorQuda("Half precision not supported\n"); } if ( data.Precision() == QUDA_SINGLE_PRECISION ) { gaugefixingOVR<float> (data, gauge_dir, Nsteps, verbose_interval, (float)relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { gaugefixingOVR<double>(data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Gauge fixing has not been built"); #endif // GPU_GAUGE_ALG } } //namespace quda
350b80f9b0f46e4319db5074f618333d31c45029.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <unitarization_links.h> #include <comm_quda.h> #include <gauge_fix_ovr_extra.h> #include <gauge_fix_ovr_hit_devf.cuh> #include <cub_helper.cuh> #include <index_helper.cuh> namespace quda { #ifdef GPU_GAUGE_ALG static int numParams = 18; #define LAUNCH_KERNEL_GAUGEFIX(kernel, tp, stream, arg, parity, ...) \ if (tp.aux.x == 0) { \ switch (tp.block.x) { \ case 256: kernel<0, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<0, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<0, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<0, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 1) { \ switch (tp.block.x) { \ case 256: kernel<1, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<1, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<1, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<1, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 2) { \ switch (tp.block.x) { \ case 256: kernel<2, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<2, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<2, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<2, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 3) { \ switch (tp.block.x) { \ case 128: kernel<3, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 256: kernel<3, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 384: kernel<3, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<3, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 640: kernel<3, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<3, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 896: kernel<3, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<3, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 4) { \ switch (tp.block.x) { \ case 128: kernel<4, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 256: kernel<4, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 384: kernel<4, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<4, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 640: kernel<4, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<4, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 896: kernel<4, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<4, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else if (tp.aux.x == 5) { \ switch (tp.block.x) { \ case 128: kernel<5, 32, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 256: kernel<5, 64, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 384: kernel<5, 96, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 512: kernel<5, 128, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 640: kernel<5, 160, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 768: kernel<5, 192, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 896: kernel<5, 224, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ case 1024: kernel<5, 256, __VA_ARGS__><<<tp.grid.x, tp.block.x, tp.shared_bytes, stream>>>(arg, parity); break; \ default: errorQuda("%s not implemented for %d threads", #kernel, tp.block.x); \ } \ } else { \ errorQuda("Not implemented for %d", tp.aux.x); \ } /** * @brief container to pass parameters for the gauge fixing quality kernel */ template <typename Gauge> struct GaugeFixQualityArg : public ReduceArg<double2> { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data) : ReduceArg<double2>(), dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } threads = X[0]*X[1]*X[2]*X[3]/2; } double getAction(){ return result_h[0].x; } double getTheta(){ return result_h[0].y; } }; /** * @brief Measure gauge fixing quality */ template<int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix_quality(GaugeFixQualityArg<Gauge> argQ){ typedef complex<Float> Cmplx; int idx_cb = threadIdx.x + blockIdx.x * blockDim.x; int parity = threadIdx.y; double2 data = make_double2(0.0,0.0); while (idx_cb < argQ.threads) { int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = argQ.X[dr]; int x[4]; getCoords(x, idx_cb, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += argQ.border[dr]; X[dr] += 2 * argQ.border[dr]; } #endif Matrix<Cmplx,3> delta; setZero(&delta); //load upward links for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U = argQ.dataOr(mu, linkIndex(x, X), parity); delta -= U; } //18*gauge_dir data.x += -delta(0, 0).x - delta(1, 1).x - delta(2, 2).x; //2 //load downward links for ( int mu = 0; mu < gauge_dir; mu++ ) { Matrix<Cmplx,3> U = argQ.dataOr(mu, linkIndexM1(x,X,mu), 1 - parity); delta += U; } //18*gauge_dir delta -= conj(delta); //18 SubTraceUnit(delta); //12 data.y += getRealTraceUVdagger(delta, delta); //35 //T=36*gauge_dir+65 idx_cb += blockDim.x * gridDim.x; } reduce2d<blockSize,2>(argQ, data); } /** * @brief Tunable object for the gauge fixing quality kernel */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixQuality : TunableLocalParity { GaugeFixQualityArg<Gauge> argQ; mutable char aux_string[128]; // used as a label in the autotuner private: bool tuneGridDim() const { return true; } public: GaugeFixQuality(GaugeFixQualityArg<Gauge> &argQ) : argQ(argQ) { } ~GaugeFixQuality () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); argQ.result_h[0] = make_double2(0.0,0.0); LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Float, Gauge, gauge_dir); qudaDeviceSynchronize(); if ( comm_size() != 1 ) comm_allreduce_array((double*)argQ.result_h, 2); argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads * comm_size()); argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads * comm_size()); } TuneKey tuneKey() const { std::stringstream vol; vol << argQ.X[0] << "x"; vol << argQ.X[1] << "x"; vol << argQ.X[2] << "x"; vol << argQ.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } long long flops() const { return (36LL * gauge_dir + 65LL) * 2 * argQ.threads; } // Only correct if there is no link reconstruction, no cub reduction accounted also //long long bytes() const { return (1)*2*gauge_dir*argQ.dataOr.Bytes(); }//no accounting the reduction!!!! argQ.dataOr.Bytes() return 0.... long long bytes() const { return 2LL * gauge_dir * 2 * argQ.threads * numParams * sizeof(Float); } //no accounting the reduction!!!! }; /** * @brief container to pass parameters for the gauge fixing kernel */ template <typename Float, typename Gauge> struct GaugeFixArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost) : dataOr(dataOr), data(data), relax_boost(relax_boost) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; /** * @brief Kernel to perform gauge fixing with overrelaxation for single-GPU */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFix(GaugeFixArg<Float, Gauge> arg, int parity){ typedef complex<Float> Cmplx; int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; // 8 threads per lattice site if ( ImplementationType < 3 ) { int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; getCoords(x, idx, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #endif int mu = (threadIdx.x / blockSize); int oddbit = parity; if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; oddbit = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, oddbit); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, oddbit) = link; } // 4 threads per lattice site else{ int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; getCoords(x, idx, X, parity); #ifdef MULTI_GPU #pragma unroll for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #endif int mu = (threadIdx.x / blockSize); idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; //load upward link Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; //load downward link Matrix<Cmplx,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the gauge fixing kernel */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFix : Tunable { GaugeFixArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim (TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFix(GaugeFixArg<Float, Gauge> &arg) : arg(arg), parity(0) { } ~GaugeFix () { } void setParity(const int par){ parity = par; } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFix, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } //no accounting the reduction!!!! }; #ifdef MULTI_GPU template <typename Float, typename Gauge> struct GaugeFixInteriorPointsArg { int threads; // number of active threads required int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixInteriorPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost) : dataOr(dataOr), data(data), relax_boost(relax_boost) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { if ( comm_dim_partitioned(dir)) border[dir] = data.R()[dir] + 1; //skip BORDER_RADIUS + face border point else border[dir] = 0; } for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir] - border[dir] * 2; #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; /** * @brief Kernel to perform gauge fixing with overrelaxation in the interior points for multi-GPU implementation */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> arg, int parity){ int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; typedef complex<Float> Complex; int X[4]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; #ifdef MULTI_GPU int za = (idx / (X[0] / 2)); int zb = (za / X[1]); x[1] = za - zb * X[1]; x[3] = (zb / X[2]); x[2] = zb - x[3] * X[2]; int p = 0; for ( int dr = 0; dr < 4; ++dr ) p += arg.border[dr]; p = p & 1; int x1odd = (x[1] + x[2] + x[3] + parity + p) & 1; //int x1odd = (x[1] + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + x1odd) - za * X[0]; for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } #else getCoords(x, idx, X, parity); #endif int mu = (threadIdx.x / blockSize); // 8 threads per lattice site if ( ImplementationType < 3 ) { if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; parity = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link = arg.dataOr(mu, idx, parity); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; } // 4 threads per lattice site else{ idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Complex,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the interior points of the gauge fixing * kernel in multi-GPU implementation */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixInteriorPoints : Tunable { GaugeFixInteriorPointsArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim (TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixInteriorPoints(GaugeFixInteriorPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) {} ~GaugeFixInteriorPoints () { } void setParity(const int par) { parity = par; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFixInteriorPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } // Only correct if there is no link reconstruction load+save }; template <typename Float, typename Gauge> struct GaugeFixBorderPointsArg { int threads; // number of active threads required int X[4]; // grid dimensions int border[4]; int *borderpoints[2]; int *faceindicessize[2]; size_t faceVolume[4]; size_t faceVolumeCB[4]; Gauge dataOr; cudaGaugeField &data; const Float relax_boost; GaugeFixBorderPointsArg(Gauge & dataOr, cudaGaugeField & data, const Float relax_boost, size_t faceVolume_[4], size_t faceVolumeCB_[4]) : dataOr(dataOr), data(data), relax_boost(relax_boost) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; border[dir] = data.R()[dir]; } /*for(int dir=0; dir<4; ++dir){ if(comm_dim_partitioned(dir)) border[dir] = BORDER_RADIUS; else border[dir] = 0; } for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir] - border[dir]*2;*/ for ( int dir = 0; dir < 4; ++dir ) { faceVolume[dir] = faceVolume_[dir]; faceVolumeCB[dir] = faceVolumeCB_[dir]; } if ( comm_partitioned() ) PreCalculateLatticeIndices(faceVolume, faceVolumeCB, X, border, threads, borderpoints); } }; /** * @brief Kernel to perform gauge fixing with overrelaxation in the border points for multi-GPU implementation */ template<int ImplementationType, int blockSize, typename Float, typename Gauge, int gauge_dir> __global__ void computeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> arg, int parity){ typedef complex<Float> Cmplx; int tid = (threadIdx.x + blockSize) % blockSize; int idx = blockIdx.x * blockSize + tid; if ( idx >= arg.threads ) return; int mu = (threadIdx.x / blockSize); idx = arg.borderpoints[parity][idx]; int X[4], x[4]; x[3] = idx / (arg.X[0] * arg.X[1] * arg.X[2]); x[2] = (idx / (arg.X[0] * arg.X[1])) % arg.X[2]; x[1] = (idx / arg.X[0]) % arg.X[1]; x[0] = idx % arg.X[0]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; #pragma unroll for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr] + 2 * arg.border[dr]; // 8 threads per lattice site if ( ImplementationType < 3 ) { if ( threadIdx.x >= blockSize * 4 ) { mu -= 4; x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; parity = 1 - parity; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 8x more shared memory than the implementation using atomicadd if ( ImplementationType == 0 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 1 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); // 8 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 2 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; } // 4 threads per lattice site else{ idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link = arg.dataOr(mu, idx, parity); x[mu] = (x[mu] - 1 + X[mu]) % X[mu]; int idx1 = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; Matrix<Cmplx,3> link1 = arg.dataOr(mu, idx1, 1 - parity); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // this implementation needs 4x more shared memory than the implementation using atomicadd if ( ImplementationType == 3 ) GaugeFixHit_NoAtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory using atomicadd if ( ImplementationType == 4 ) GaugeFixHit_AtomicAdd<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); // 4 treads per lattice site, the reduction is performed by shared memory without using atomicadd. // uses the same amount of shared memory as the atomicadd implementation with more thread block synchronization if ( ImplementationType == 5 ) GaugeFixHit_NoAtomicAdd_LessSM<blockSize, Float, gauge_dir, 3>(link, link1, arg.relax_boost, tid); arg.dataOr(mu, idx, parity) = link; arg.dataOr(mu, idx1, 1 - parity) = link1; } } /** * @brief Tunable object for the border points of the gauge fixing kernel in multi-GPU implementation */ template<typename Float, typename Gauge, int gauge_dir> class GaugeFixBorderPoints : Tunable { GaugeFixBorderPointsArg<Float, Gauge> arg; int parity; mutable char aux_string[128]; // used as a label in the autotuner protected: dim3 createGrid(const TuneParam &param) const { unsigned int blockx = param.block.x / 8; if (param.aux.x > 2) blockx = param.block.x / 4; unsigned int gx = (arg.threads + blockx - 1) / blockx; return dim3(gx, 1, 1); } bool advanceBlockDim(TuneParam &param) const { // Use param.aux.x to tune and save state for best kernel option // to make use or not of atomicAdd operations and 4 or 8 threads per lattice site!!! const unsigned int min_threads0 = 32 * 8; const unsigned int min_threads1 = 32 * 4; const unsigned int max_threads = 1024; // FIXME: use deviceProp.maxThreadsDim[0]; const unsigned int atmadd = 0; unsigned int min_threads = min_threads0; param.aux.x += atmadd; // USE TO SELECT BEST KERNEL OPTION WITH/WITHOUT USING ATOMICADD if (param.aux.x > 2) min_threads = 32 * 4; param.block.x += min_threads; param.block.y = 1; param.grid = createGrid(param); if ((param.block.x >= min_threads) && (param.block.x <= max_threads)) { param.shared_bytes = sharedBytesPerBlock(param); return true; } else if (param.aux.x == 0) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 1; // USE FOR ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 1) { param.block.x = min_threads0; param.block.y = 1; param.aux.x = 2; // USE FOR NO ATOMIC ADD and LESS SHARED MEM param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float) / 8; return true; } else if (param.aux.x == 2) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 3; // USE FOR NO ATOMIC ADD param.grid = createGrid(param); param.shared_bytes = param.block.x * 4 * sizeof(Float); return true; } else if (param.aux.x == 3) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 4; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else if (param.aux.x == 4) { param.block.x = min_threads1; param.block.y = 1; param.aux.x = 5; param.grid = createGrid(param); param.shared_bytes = param.block.x * sizeof(Float); return true; } else { return false; } } private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { switch (param.aux.x) { case 0: return param.block.x * 4 * sizeof(Float); case 1: return param.block.x * 4 * sizeof(Float) / 8; case 2: return param.block.x * 4 * sizeof(Float) / 8; case 3: return param.block.x * 4 * sizeof(Float); default: return param.block.x * sizeof(Float); } } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: GaugeFixBorderPoints(GaugeFixBorderPointsArg<Float, Gauge> &arg) : arg(arg), parity(0) { } ~GaugeFixBorderPoints () { if ( comm_partitioned() ) for ( int i = 0; i < 2; i++ ) pool_device_free(arg.borderpoints[i]); } void setParity(const int par){ parity = par; } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); LAUNCH_KERNEL_GAUGEFIX(computeFixBorderPoints, tp, stream, arg, parity, Float, Gauge, gauge_dir); } virtual void initTuneParam(TuneParam &param) const { param.block = dim3(256, 1, 1); param.aux.x = 0; param.grid = createGrid(param); param.shared_bytes = sharedBytesPerBlock(param); } virtual void defaultTuneParam(TuneParam &param) const { initTuneParam(param); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%lu,gaugedir=%d",arg.threads,sizeof(Float),gauge_dir); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps(Tunable::paramString(param)); ps << ", atomicadd=" << param.aux.x; return ps.str(); } //need this void preTune() { arg.data.backup(); } void postTune() { arg.data.restore(); } long long flops() const { return 3LL * (22 + 28 * gauge_dir + 224 * 3) * arg.threads; } // Only correct if there is no link reconstruction //long long bytes() const { return (1)*8*2*arg.dataOr.Bytes(); } // Only correct if there is no link reconstruction load+save long long bytes() const { return 8LL * 2 * arg.threads * numParams * sizeof(Float); } // Only correct if there is no link reconstruction load+save }; template <typename Gauge> struct GaugeFixUnPackArg { int X[4]; // grid dimensions #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; GaugeFixUnPackArg(Gauge & dataOr, cudaGaugeField & data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; #ifdef MULTI_GPU border[dir] = data.R()[dir]; #endif } } }; template<int NElems, typename Float, typename Gauge, bool pack> __global__ void Kernel_UnPackGhost(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= size ) return; int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; int borderid = 0; parity = 1 - parity; switch ( face ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } x[face] -= 1; parity = 1 - parity; int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; typedef complex<Float> Cmplx; typedef typename mapper<Float>::type RegType; RegType tmp[NElems]; Cmplx data[9]; if ( pack ) { arg.dataOr.load(data, id, dir, parity); arg.dataOr.reconstruct.Pack(tmp, data, id); for ( int i = 0; i < NElems / 2; ++i ) { array[idx + size * i] = Cmplx(tmp[2*i+0], tmp[2*i+1]); } } else { for ( int i = 0; i < NElems / 2; ++i ) { tmp[2*i+0] = array[idx + size * i].real(); tmp[2*i+1] = array[idx + size * i].imag(); } arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R); arg.dataOr.save(data, id, dir, parity); } } template<int NElems, typename Float, typename Gauge, bool pack> __global__ void Kernel_UnPackTop(int size, GaugeFixUnPackArg<Gauge> arg, complex<Float> *array, int parity, int face, int dir){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= size ) return; int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; int borderid = arg.X[face] - 1; switch ( face ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } for ( int dr = 0; dr < 4; ++dr ) { x[dr] += arg.border[dr]; X[dr] += 2 * arg.border[dr]; } int id = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]) >> 1; typedef complex<Float> Cmplx; typedef typename mapper<Float>::type RegType; RegType tmp[NElems]; Cmplx data[9]; if ( pack ) { arg.dataOr.load(data, id, dir, parity); arg.dataOr.reconstruct.Pack(tmp, data, id); for ( int i = 0; i < NElems / 2; ++i ) array[idx + size * i] = Cmplx(tmp[2*i+0], tmp[2*i+1]); } else{ for ( int i = 0; i < NElems / 2; ++i ) { tmp[2*i+0] = array[idx + size * i].real(); tmp[2*i+1] = array[idx + size * i].imag(); } arg.dataOr.reconstruct.Unpack(data, tmp, id, dir, 0, arg.dataOr.X, arg.dataOr.R); arg.dataOr.save(data, id, dir, parity); } } #endif template<typename Float, typename Gauge, int NElems, int gauge_dir> void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { TimeProfile profileInternalGaugeFixOVR("InternalGaugeFixQudaOVR", false); profileInternalGaugeFixOVR.TPSTART(QUDA_PROFILE_COMPUTE); double flop = 0; double byte = 0; printfQuda("\tOverrelaxation boost parameter: %lf\n", (double)relax_boost); printfQuda("\tStop criterium: %lf\n", tolerance); if ( stopWtheta ) printfQuda("\tStop criterium method: theta\n"); else printfQuda("\tStop criterium method: Delta\n"); printfQuda("\tMaximum number of iterations: %d\n", Nsteps); printfQuda("\tReunitarize at every %d steps\n", reunit_interval); printfQuda("\tPrint convergence results at every %d steps\n", verbose_interval); const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures = 0; int* num_failures_dev = static_cast<int*>(pool_device_malloc(sizeof(int))); cudaMemset(num_failures_dev, 0, sizeof(int)); GaugeFixQualityArg<Gauge> argQ(dataOr, data); GaugeFixQuality<Float,Gauge, gauge_dir> GaugeFixQuality(argQ); GaugeFixArg<Float, Gauge> arg(dataOr, data, relax_boost); GaugeFix<Float,Gauge, gauge_dir> gaugeFix(arg); #ifdef MULTI_GPU void *send[4]; void *recv[4]; void *sendg[4]; void *recvg[4]; void *send_d[4]; void *recv_d[4]; void *sendg_d[4]; void *recvg_d[4]; void *hostbuffer_h[4]; cudaStream_t GFStream[9]; size_t offset[4]; size_t bytes[4]; size_t faceVolume[4]; size_t faceVolumeCB[4]; // do the exchange MsgHandle *mh_recv_back[4]; MsgHandle *mh_recv_fwd[4]; MsgHandle *mh_send_fwd[4]; MsgHandle *mh_send_back[4]; int X[4]; dim3 block[4]; dim3 grid[4]; if ( comm_partitioned() ) { for ( int dir = 0; dir < 4; ++dir ) { X[dir] = data.X()[dir] - data.R()[dir] * 2; if ( !commDimPartitioned(dir) && data.R()[dir] != 0 ) errorQuda("Not supported!\n"); } for ( int i = 0; i < 4; i++ ) { faceVolume[i] = 1; for ( int j = 0; j < 4; j++ ) { if ( i == j ) continue; faceVolume[i] *= X[j]; } faceVolumeCB[i] = faceVolume[i] / 2; } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; offset[d] = faceVolumeCB[d] * NElems; bytes[d] = sizeof(Float) * offset[d]; send_d[d] = device_malloc(bytes[d]); recv_d[d] = device_malloc(bytes[d]); sendg_d[d] = device_malloc(bytes[d]); recvg_d[d] = device_malloc(bytes[d]); cudaStreamCreate(&GFStream[d]); cudaStreamCreate(&GFStream[4 + d]); #ifndef GPU_COMMS hostbuffer_h[d] = (void*)pinned_malloc(4 * bytes[d]); #endif block[d] = make_uint3(128, 1, 1); grid[d] = make_uint3((faceVolumeCB[d] + block[d].x - 1) / block[d].x, 1, 1); } cudaStreamCreate(&GFStream[8]); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS recv[d] = recv_d[d]; send[d] = send_d[d]; recvg[d] = recvg_d[d]; sendg[d] = sendg_d[d]; #else recv[d] = hostbuffer_h[d]; send[d] = static_cast<char*>(hostbuffer_h[d]) + bytes[d]; recvg[d] = static_cast<char*>(hostbuffer_h[d]) + 3 * bytes[d]; sendg[d] = static_cast<char*>(hostbuffer_h[d]) + 2 * bytes[d]; #endif mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]); mh_recv_fwd[d] = comm_declare_receive_relative(recvg[d], d, +1, bytes[d]); mh_send_back[d] = comm_declare_send_relative(sendg[d], d, -1, bytes[d]); mh_send_fwd[d] = comm_declare_send_relative(send[d], d, +1, bytes[d]); } } GaugeFixUnPackArg<Gauge> dataexarg(dataOr, data); GaugeFixBorderPointsArg<Float, Gauge> argBorder(dataOr, data, relax_boost, faceVolume, faceVolumeCB); GaugeFixBorderPoints<Float,Gauge, gauge_dir> gfixBorderPoints(argBorder); GaugeFixInteriorPointsArg<Float, Gauge> argInt(dataOr, data, relax_boost); GaugeFixInteriorPoints<Float,Gauge, gauge_dir> gfixIntPoints(argInt); #endif GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action0 = argQ.getAction(); printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta()); unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost); if ( num_failures > 0 ) { pool_device_free(num_failures_dev); errorQuda("Error in the unitarization\n"); exit(1); } cudaMemset(num_failures_dev, 0, sizeof(int)); int iter = 0; for ( iter = 0; iter < Nsteps; iter++ ) { for ( int p = 0; p < 2; p++ ) { #ifndef MULTI_GPU gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); #else if ( !comm_partitioned() ) { gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); } else{ gfixIntPoints.setParity(p); gfixBorderPoints.setParity(p); //compute border points gfixBorderPoints.apply(0); flop += (double)gfixBorderPoints.flops(); byte += (double)gfixBorderPoints.bytes(); flop += (double)gfixIntPoints.flops(); byte += (double)gfixIntPoints.bytes(); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_start(mh_recv_back[d]); comm_start(mh_recv_fwd[d]); } //wait for the update to the halo points before start packing... qudaDeviceSynchronize(); for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; //extract top face Kernel_UnPackTop<NElems, Float, Gauge, true> <<< grid[d], block[d], 0, GFStream[d] >>> (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(send_d[d]), p, d, d); //extract bottom ghost Kernel_UnPackGhost<NElems, Float, Gauge, true> <<< grid[d], block[d], 0, GFStream[4 + d] >>> (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(sendg_d[d]), 1 - p, d, d); } #ifdef GPU_COMMS for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; qudaStreamSynchronize(GFStream[d]); comm_start(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[4 + d]); comm_start(mh_send_back[d]); } #else for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; cudaMemcpyAsync(send[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost, GFStream[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; cudaMemcpyAsync(sendg[d], sendg_d[d], bytes[d], cudaMemcpyDeviceToHost, GFStream[4 + d]); } #endif //compute interior points gfixIntPoints.apply(GFStream[8]); #ifndef GPU_COMMS for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; qudaStreamSynchronize(GFStream[d]); comm_start(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[4 + d]); comm_start(mh_send_back[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_recv_back[d]); cudaMemcpyAsync(recv_d[d], recv[d], bytes[d], cudaMemcpyHostToDevice, GFStream[d]); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_recv_fwd[d]); cudaMemcpyAsync(recvg_d[d], recvg[d], bytes[d], cudaMemcpyHostToDevice, GFStream[4 + d]); } #endif for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS comm_wait(mh_recv_back[d]); #endif Kernel_UnPackGhost<NElems, Float, Gauge, false> <<< grid[d], block[d], 0, GFStream[d] >>> (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recv_d[d]), p, d, d); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; #ifdef GPU_COMMS comm_wait(mh_recv_fwd[d]); #endif Kernel_UnPackTop<NElems, Float, Gauge, false> <<< grid[d], block[d], 0, GFStream[4 + d] >>> (faceVolumeCB[d], dataexarg, reinterpret_cast<complex<Float>*>(recvg_d[d]), 1 - p, d, d); } for ( int d = 0; d < 4; d++ ) { if ( !commDimPartitioned(d)) continue; comm_wait(mh_send_back[d]); comm_wait(mh_send_fwd[d]); qudaStreamSynchronize(GFStream[d]); qudaStreamSynchronize(GFStream[4 + d]); } qudaStreamSynchronize(GFStream[8]); } #endif /*gaugeFix.setParity(p); gaugeFix.apply(0); flop += (double)gaugeFix.flops(); byte += (double)gaugeFix.bytes(); #ifdef MULTI_GPU if(comm_partitioned()){//exchange updated top face links in current parity for (int d=0; d<4; d++) { if (!commDimPartitioned(d)) continue; comm_start(mh_recv_back[d]); //extract top face Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(send_d[d]), p, d, d, true); #ifndef GPU_COMMS cudaMemcpy(send[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost); #else qudaDeviceSynchronize(); #endif comm_start(mh_send_fwd[d]); comm_wait(mh_recv_back[d]); comm_wait(mh_send_fwd[d]); #ifndef GPU_COMMS cudaMemcpy(recv_d[d], recv[d], bytes[d], cudaMemcpyHostToDevice); #endif //inject top face in ghost Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recv_d[d]), p, d, d, false); } //exchange updated ghost links in opposite parity for (int d=0; d<4; d++) { if (!commDimPartitioned(d)) continue; comm_start(mh_recv_fwd[d]); Kernel_UnPackGhost<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(sendg_d[d]), 1-p, d, d, true); #ifndef GPU_COMMS cudaMemcpy(sendg[d], sendg_d[d], bytes[d], cudaMemcpyDeviceToHost); #else qudaDeviceSynchronize(); #endif comm_start(mh_send_back[d]); comm_wait(mh_recv_fwd[d]); comm_wait(mh_send_back[d]); #ifndef GPU_COMMS cudaMemcpy(recvg_d[d], recvg[d], bytes[d], cudaMemcpyHostToDevice); #endif Kernel_UnPackTop<NElems, Float, Gauge><<<grid[d], block[d]>>>(faceVolumeCB[d], dataexarg, reinterpret_cast<Float*>(recvg_d[d]), 1-p, d, d, false); } } #endif*/ } if ((iter % reunit_interval) == (reunit_interval - 1)) { unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost); if ( num_failures > 0 ) errorQuda("Error in the unitarization\n"); cudaMemset(num_failures_dev, 0, sizeof(int)); flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes(); } GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action = argQ.getAction(); double diff = abs(action0 - action); if ((iter % verbose_interval) == (verbose_interval - 1)) printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; } else{ if ( diff < tolerance ) break; } action0 = action; } if ((iter % reunit_interval) != 0 ) { unitarizeLinks(data, data, num_failures_dev); qudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost); if ( num_failures > 0 ) errorQuda("Error in the unitarization\n"); cudaMemset(num_failures_dev, 0, sizeof(int)); flop += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; byte += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes(); } if ((iter % verbose_interval) != 0 ) { GaugeFixQuality.apply(0); flop += (double)GaugeFixQuality.flops(); byte += (double)GaugeFixQuality.bytes(); double action = argQ.getAction(); double diff = abs(action0 - action); printfQuda("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff); } pool_device_free(num_failures_dev); #ifdef MULTI_GPU if ( comm_partitioned() ) { data.exchangeExtendedGhost(data.R(),false); for ( int d = 0; d < 4; d++ ) { if ( commDimPartitioned(d)) { comm_free(mh_send_fwd[d]); comm_free(mh_send_back[d]); comm_free(mh_recv_back[d]); comm_free(mh_recv_fwd[d]); device_free(send_d[d]); device_free(recv_d[d]); device_free(sendg_d[d]); device_free(recvg_d[d]); cudaStreamDestroy(GFStream[d]); cudaStreamDestroy(GFStream[4 + d]); #ifndef GPU_COMMS host_free(hostbuffer_h[d]); #endif } } cudaStreamDestroy(GFStream[8]); } #endif checkCudaError(); qudaDeviceSynchronize(); profileInternalGaugeFixOVR.TPSTOP(QUDA_PROFILE_COMPUTE); if (getVerbosity() > QUDA_SUMMARIZE){ double secs = profileInternalGaugeFixOVR.Last(QUDA_PROFILE_COMPUTE); double gflops = (flop * 1e-9) / (secs); double gbytes = byte / (secs * 1e9); #ifdef MULTI_GPU printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops * comm_size(), gbytes * comm_size()); #else printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes); #endif } } template<typename Float, int NElems, typename Gauge> void gaugefixingOVR( Gauge dataOr, cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { if ( gauge_dir != 3 ) { printfQuda("Starting Landau gauge fixing...\n"); gaugefixingOVR<Float, Gauge, NElems, 4>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { printfQuda("Starting Coulomb gauge fixing...\n"); gaugefixingOVR<Float, Gauge, NElems, 3>(dataOr, data, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } } template<typename Float> void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const Float relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { // Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12 if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { //printfQuda("QUDA_RECONSTRUCT_NO\n"); numParams = 18; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; gaugefixingOVR<Float, 18>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { //printfQuda("QUDA_RECONSTRUCT_12\n"); numParams = 12; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; gaugefixingOVR<Float, 12>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { //printfQuda("QUDA_RECONSTRUCT_8\n"); numParams = 8; typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; gaugefixingOVR<Float, 8>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** * @brief Gauge fixing with overrelaxation with support for single and multi GPU. * @param[in,out] data, quda gauge field * @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing * @param[in] Nsteps, maximum number of steps to perform gauge fixing * @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this * @param[in] relax_boost, gauge fixing parameter of the overrelaxation method, most common value is 1.5 or 1.7. * @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps * @param[in] reunit_interval, reunitarize gauge field when iteration count is a multiple of this * @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value */ void gaugefixingOVR( cudaGaugeField& data, const int gauge_dir, const int Nsteps, const int verbose_interval, const double relax_boost, const double tolerance, const int reunit_interval, const int stopWtheta) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_HALF_PRECISION ) { errorQuda("Half precision not supported\n"); } if ( data.Precision() == QUDA_SINGLE_PRECISION ) { gaugefixingOVR<float> (data, gauge_dir, Nsteps, verbose_interval, (float)relax_boost, tolerance, reunit_interval, stopWtheta); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { gaugefixingOVR<double>(data, gauge_dir, Nsteps, verbose_interval, relax_boost, tolerance, reunit_interval, stopWtheta); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Gauge fixing has not been built"); #endif // GPU_GAUGE_ALG } } //namespace quda
ec1e63ed0f64c4b7f5746d43320671a2ab49f1d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ********************************************************************************************************************************************** // KERNELS PRINCIPAUX POUR PROJET HPC // ASTRID LEGAY ET MARCO NAGUIB - MAIN 5 // 15 DECEMBRE 2020 // ********************************************************************************************************************************************** // STRUCTURE UTILE // Pour utiliser la notion de point typedef struct { int x ; int y ; } Point ; // ********************************************************************************************************************************************** // QUESTIONS 1 // ********************************************************************************************************************************************** // MergeSmall_k permet de merger le tableau A et B (dj tris) dans M // On prend en entre le tableau A tri, le tableau B tri, le tableau M pour mettre le rsultat, la taille de A et la taille de B // Paralllisation de l'algorithme B __global__ void MergeSmall_k(TYPE *A, TYPE *B, TYPE *M, int cardA, int cardB) { Point K; Point P; Point Q; int offset ; int i = threadIdx.x ; // Id du thread, permet de savoir quelle valeur va tre rang sa place dfinitive. { if (i > cardA) { K.x = i - cardA ; K.y = cardA ; P.x = cardA ; P.y = i - cardA ; } else { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs (K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; // Q est bien sur une diagonale 45 if (((Q.y >= 0 ) && (Q.x <= cardB)) && ((Q.y == cardA) || (Q.x == 0) || (A[Q.y]>B[Q.x -1]))){ if ((Q.x == cardB) || (Q.y == 0) || (A[Q.y-1]<=B[Q.x])) { if((Q.y < cardA) && ((Q.x == cardB) || (A[Q.y] <= B[Q.x]))) { M[i]= A[Q.y] ; } else { M[i] = B[Q.x] ; } break; // Pour simuler passage au thread suivant } else { K.x = Q.x +1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y +1 ; } } } } // Ajout des lignes 68 82 pour travailler sur la mmoire shared __global__ void MergeSmallShared_k(TYPE *GlobalCudaA, TYPE *GlobalCudaB, TYPE *M, int sizeA, int sizeB) { extern __shared__ TYPE dataAB[] ; // j utilise la mmoire partage entre les threads unsigned int tid = threadIdx.x; // numro du thread dans le block courant unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // numro du thread sur l ensemble des blocks if (tid >= (sizeA + sizeB)) { return ; } // On gre les bordements // Chargement des donnes dans la mmoire partage par le thread ; dataAB[tid] = (i < sizeA)?GlobalCudaA[i]:GlobalCudaB[i-sizeA] ; // voir si on travaille sur le vecteur A ou B // On attend que tous les threads aient faits le travail ( charg la mmoire) __syncthreads(); // // On recadre nos pointeurs pourqu'ils pointent vers la mmoire partage et non la globale TYPE * A = dataAB ; TYPE * B = dataAB + sizeA ; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // QUESTION 2 // ********************************************************************************************************************************************** __global__ void PathBig(TYPE * CudaVecteurA, TYPE * CudaVecteurB, int sizeA , int sizeB, int * CudaDiagBx, int * CudaDiagAy, int nbthread, int NbWindows) { // A : an array of size sizeA // B : an array of size sizeB // (CudaDiagBx,CudaDiagAy) recieve the respective coordinates of the "red points" // nbthread : Number of threads, preferably 1024 // NbWindows : Number of windows //Initialisation diagolane CudaDiagBx[0] = CudaDiagAy[0] = 0 ; //(0,0) CudaDiagBx[NbWindows] = sizeB ; CudaDiagAy[NbWindows] = sizeA ; //(sizeA,sizeB) int nth = threadIdx.x; // On explore le nth diagonale Point K, P, Q ; int px , py ; TYPE * A = CudaVecteurA ; TYPE * B = CudaVecteurB ; int offset ; int numDiag = (nth+1) * nbthread -1 ; // Les tableaux vont de 0 N-1 if (numDiag > sizeA) { K.x = numDiag - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = numDiag - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = numDiag ; P.x = numDiag ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { px = Q.x ; py = Q.y ; if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { // v = A[Q.y] ; py ++ ; } else { // v = B[Q.x] ; px ++ ; } // printf("Analyse Diagonale Point de Sortie ref %d - M %" FMT " Q (A Q.y %d) (B Q.x %d) rv.x %d rv.y %d\n",i,v,Q.y,Q.x,rv->x,rv->y) ; CudaDiagBx[nth+1] = px ; CudaDiagAy[nth+1] = py ; break ; // Pour simuler passage au thread suivant } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } __global__ void MergeBig_k(TYPE * CudaVecteurA, TYPE * CudaVecteurB, TYPE * CudaVecteurC, int * CudaDiagAy, int * CudaDiagBx , int nbthread) { // int i = threadIdx.x ; // On renge le Ieme element int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet int diag = (i / nbthread) ; // Dans quel fentre est-il ? int indC = nbthread * diag ; TYPE *A = CudaVecteurA+CudaDiagAy[diag] ; TYPE *B = CudaVecteurB+CudaDiagBx[diag] ; TYPE *M = CudaVecteurC + indC ; int sizeA = CudaDiagAy[diag+1]-CudaDiagAy[diag] ; int sizeB = CudaDiagBx[diag+1]-CudaDiagBx[diag] ; Point K, P, Q; int offset ; i = i % nbthread ; // On recadre i dans le nouvel espace if (i >= (sizeA + sizeB)) { return ; } // On gre les bordements if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // QUESTION 3 // ********************************************************************************************************************************************** // Nous vous avons expliquer l'algortihme de Merge Sort prcdement, donc nous travaillons par taille : t = t*2 // Tout d'abord, il est important de noter qu'on ne travaille en parallle qu' partir de la taille 4 pour optimiser le code // Taille 1 : tri la "main" sur le HOST // Taille 2 : tri de l'algorithme A de l'anonc sur le HOST // A partir de la taille 4 : mise en place de CUDA sur GPU pour parallliser : si size A + sizeB <= 1024 : appelle MergeSmall sinon PathBig et MergeBig // Ensuite nous avons une notion de FLIP/FLOP : mis en place pour viter de nombreuses copies et ainsi gagner du temps // Concernant la notion de FLIP/FLOP, je vais l'expliquer avec un schma avant de monter sur le code void MergeSort(TYPE * M, int sizeM) { //Declarations hipError_t errCuda; TYPE * ptori = NULL ; // pointeur origine TYPE * ptdest = NULL ; // pointeur destination TYPE * pttmp ; TYPE * cudaOri = NULL ; // pointeur orgine dans CUDA TYPE * cudaDest = NULL ; // pointeur dest dans CUDA int t ; int * CudaDiagBx = NULL ; int * CudaDiagAy = NULL ; //Allocation if ((ptdest = (TYPE *) malloc(sizeM * sizeof(TYPE))) == NULL) { printf("PB allocation VecteurM2n") ; exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&cudaOri, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaOri - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&cudaDest, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaDest - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&CudaDiagBx, (1025 + 1) * sizeof(int)))) { printf("PB allocation CudaDiagBx pour - %d - %s \n", errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&CudaDiagAy, (1025 + 1)* sizeof(int)))) { printf("PB allocation CudaDiagAy - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } ptori = M ; // Premire itration on le trie la main pour gagner du temps for (int i = 0 ; i < sizeM ; i += 2 ) { if (ptori[i] > ptori[i+1]) { ptdest[i+1] = ptori[i]; ptdest[i] = ptori[i+1] ; } else { ptdest[i] = ptori[i]; ptdest[i+1] = ptori[i+1]; } } // Flip Flop entre ptori et ptdest pttmp = ptdest ; ptdest= ptori ; ptori = pttmp ; t=2; // Seconde itration on le fait en squentiel avec l'algo A du sujet pour gagner du temps for (int i = 0 ; i < sizeM ; i = i+(2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * ptA = ptori + i; TYPE * ptB = ptori + i + sizeA ; TYPE * ptM = ptdest + i ; MergeSimpleHOST(ptA, ptB, ptM, sizeA , sizeB) ; } if (hipSuccess != (errCuda = hipMemcpy(cudaOri, ptdest, sizeM * sizeof(TYPE), hipMemcpyHostToDevice))) { printf("PB Copie Host ptDest -> cudaOri - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Le reste des itrations on utilise mergesmall et mergebig for ( t = 4 ; t < sizeM ; t= t*2) { for ( int i = 0 ; i < sizeM ; i = i + (2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * CudaVecteurA = cudaOri + i ; TYPE * CudaVecteurB = cudaOri + i + sizeA ; if ((sizeA == 0) || (sizeB == 0)) { if (sizeA != 0) { if (hipSuccess != (errCuda = hipMemcpy(cudaDest + i, CudaVecteurA , sizeA * sizeof(TYPE), hipMemcpyDeviceToDevice))) { printf("PB Copie Cuda A -> ptDes rab %d - %d - %s \n",sizeA, errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } } continue ; } //Merge if (sizeA+sizeB <= 1024) { hipLaunchKernelGGL(( MergeSmall_k), dim3(1),dim3(sizeA+sizeB), 0, 0, CudaVecteurA,CudaVecteurB,cudaDest+i,sizeA,sizeB); } else { int nbthread = 1024; int NbDiagonale = (sizeA + sizeB) / nbthread ; if (NbDiagonale > 1024) { printf("Oups, on n'a pas fait le code pour nbDiag %d > 1024\n",NbDiagonale) ; return ; } int NbWindows = NbDiagonale ; NbWindows += (((sizeA + sizeB) % nbthread) == 0)?0:1 ; // si (SizeA + SizeB) % nbthread == 0 alors nbWindows = 0 sinon = 1 hipLaunchKernelGGL(( PathBig), dim3(1),dim3(NbDiagonale), 0, 0, CudaVecteurA, CudaVecteurB, sizeA , sizeB, CudaDiagBx, CudaDiagAy, nbthread,NbWindows) ; int nbBlock = (sizeA+sizeB) / 1024 ; nbBlock += ((sizeA+sizeB) % 1024)?1:0 ; hipLaunchKernelGGL(( MergeBig_k), dim3(nbBlock),dim3(1024), 0, 0, CudaVecteurA, CudaVecteurB, cudaDest+i, CudaDiagAy, CudaDiagBx, nbthread) ; } }// End for i // Flip Flop entre les bancs cudaOri et cudaDest TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } // End of loop t if (hipSuccess != hipMemcpy(M, cudaOri, sizeM * sizeof(TYPE), hipMemcpyDeviceToHost)) { printf("PB copie cuda M -> host M \n") ; fflush(stdout); exit(2) ; } // Free if (cudaOri != NULL) { hipFree(cudaOri) ; cudaOri = NULL ; } if (cudaDest != NULL) { hipFree(cudaDest) ; cudaDest = NULL ; } if (CudaDiagAy != NULL) { hipFree(CudaDiagAy) ; CudaDiagAy = NULL ; } if (CudaDiagBx != NULL) { hipFree(CudaDiagBx) ; CudaDiagBx = NULL ; } } // ********************************************************************************************************************************************** // QUESTION 5 // ********************************************************************************************************************************************** __global__ void MergeSmallBatch_k(TYPE *ABAB, int sizeM_tot, TYPE* MM, int d) { int i = threadIdx.x%d; int Qt = (threadIdx.x-i)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); if (threadIdx.x + blockIdx.x*blockDim.x >= sizeM_tot) return; int t = d/2; int sizeA = t; int sizeB = t; ABAB=ABAB+gbx*d; TYPE* A=ABAB; TYPE* B=A+sizeA; TYPE* M=MM+gbx*d; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // PARTIE 3 // ********************************************************************************************************************************************** void MergeSort(TYPE * M, int sizeM) { //Declarations hipError_t errCuda; TYPE * cudaOri = NULL ; // pointeur orgine dans CUDA TYPE * cudaDest = NULL ; // pointeur dest dans CUDA int * CudaDiagBx = NULL ; int * CudaDiagAy = NULL ; int t ; //Allocation if (hipSuccess != (errCuda = hipMalloc((void**)&cudaOri, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaVecteurM1 - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (hipSuccess != (errCuda = hipMalloc((void**)&cudaDest, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaVecteurM2 - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&CudaDiagBx, 1026 * sizeof(int)))) { printf("PB allocation CudaDiagBx %d - %d - %s \n", errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (hipSuccess != (errCuda = hipMalloc((void**)&CudaDiagAy, 1026 * sizeof(int)))) { printf("PB allocation CudaDiagAy - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Initialiser cudaOri if (hipSuccess != (errCuda = hipMemcpy(cudaOri, M, sizeM * sizeof(TYPE), hipMemcpyHostToDevice))) { printf("PB Copie Host ptDest -> cudaOri - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Trier cudaOri par blocs de 2, puis par blocs de 4, etc jusqua 512 for ( t = 1 ; t <= 512 and t<sizeM ; t= t*2){ //partie divisible par d int d=t*2; int size_AetB = sizeM%d;//taille restante hipLaunchKernelGGL(( MergeSmallBatch_k), dim3(1024),dim3(1024), 0, 0, cudaOri,sizeM-size_AetB,cudaDest,t*2); //partie restante int sizeA = min(size_AetB,t); int sizeB = size_AetB - sizeA; TYPE* cudaM = cudaDest+sizeM-size_AetB; TYPE* cudaA = cudaOri+sizeM-size_AetB; TYPE* cudaB = cudaA+sizeA; hipLaunchKernelGGL(( MergeSmall_k), dim3(1),dim3(sizeA+sizeB), 0, 0, cudaA,cudaB,cudaM,sizeA,sizeB); // Flip Flop entre les bancs cudaDest et cudaOri TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } //t=512 on trie par blocs de taille suprieure laide de PathBig et MergeBig for ( t = t ; t < sizeM ; t= t*2) { for ( int i = 0 ; i < sizeM ; i = i + (2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * CudaVecteurA = cudaOri + i ; TYPE * CudaVecteurB = cudaOri + i + sizeA ; if ((sizeA == 0) || (sizeB == 0)) { if (sizeA != 0) { if (hipSuccess != (errCuda = hipMemcpy(cudaDest + i, CudaVecteurA , sizeA * sizeof(TYPE), hipMemcpyDeviceToDevice))) { printf("PB Copie Cuda A -> ptDes rab %d - %d - %s \n",sizeA, errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } } continue ; } //Merge if (sizeA+sizeB <= 1024) { printf("Oups, on est cens avoir trait ce cas par le merge batch\n") ; return ;} int nbthread = 1024; int NbDiagonale = (sizeA + sizeB) / nbthread ; if (NbDiagonale > 1024) { printf("Oups, on n'a pas fait le code pour nbDiag %d > 1024\n",NbDiagonale) ; return ; } int NbWindows = NbDiagonale ; NbWindows += (((sizeA + sizeB) % nbthread) == 0)?0:1 ; // si (SizeA + SizeB) % nbthread == 0 alors nbWindows = 0 sinon = 1 hipLaunchKernelGGL(( PathBig), dim3(1),dim3(NbDiagonale), 0, 0, CudaVecteurA, CudaVecteurB, sizeA , sizeB, CudaDiagBx, CudaDiagAy, nbthread,NbWindows) ; int nbBlock = (sizeA+sizeB) / 1024 ; nbBlock += ((sizeA+sizeB) % 1024)?1:0 ; hipLaunchKernelGGL(( MergeBig_k), dim3(nbBlock),dim3(1024), 0, 0, CudaVecteurA, CudaVecteurB, cudaDest+i, CudaDiagAy, CudaDiagBx, nbthread) ; } // End for i // Flip Flop entre les bancs cudaDest et cudaOri TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } //cudaOri est entirement tri //remettre dans M if (hipSuccess != hipMemcpy(M, cudaOri, sizeM * sizeof(TYPE), hipMemcpyDeviceToHost)) { printf("PB copie cuda M -> host M \n") ; fflush(stdout); exit(2) ; } // Free if (cudaOri != NULL) { hipFree(cudaOri) ; cudaOri = NULL ; } if (cudaDest != NULL) { hipFree(cudaDest) ; cudaDest = NULL ; } if (CudaDiagAy != NULL) { hipFree(CudaDiagAy) ; CudaDiagAy = NULL ; } if (CudaDiagBx != NULL) { hipFree(CudaDiagBx) ; CudaDiagBx = NULL ; } }
ec1e63ed0f64c4b7f5746d43320671a2ab49f1d9.cu
// ********************************************************************************************************************************************** // KERNELS PRINCIPAUX POUR PROJET HPC // ASTRID LEGAY ET MARCO NAGUIB - MAIN 5 // 15 DECEMBRE 2020 // ********************************************************************************************************************************************** // STRUCTURE UTILE // Pour utiliser la notion de point typedef struct { int x ; int y ; } Point ; // ********************************************************************************************************************************************** // QUESTIONS 1 // ********************************************************************************************************************************************** // MergeSmall_k permet de merger le tableau A et B (déjà triés) dans M // On prend en entrée le tableau A trié, le tableau B trié, le tableau M pour mettre le résultat, la taille de A et la taille de B // Parallélisation de l'algorithme B __global__ void MergeSmall_k(TYPE *A, TYPE *B, TYPE *M, int cardA, int cardB) { Point K; Point P; Point Q; int offset ; int i = threadIdx.x ; // Id du thread, permet de savoir quelle valeur va être rangé à sa place définitive. { if (i > cardA) { K.x = i - cardA ; K.y = cardA ; P.x = cardA ; P.y = i - cardA ; } else { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs (K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; // Q est bien sur une diagonale à 45° if (((Q.y >= 0 ) && (Q.x <= cardB)) && ((Q.y == cardA) || (Q.x == 0) || (A[Q.y]>B[Q.x -1]))){ if ((Q.x == cardB) || (Q.y == 0) || (A[Q.y-1]<=B[Q.x])) { if((Q.y < cardA) && ((Q.x == cardB) || (A[Q.y] <= B[Q.x]))) { M[i]= A[Q.y] ; } else { M[i] = B[Q.x] ; } break; // Pour simuler passage au thread suivant } else { K.x = Q.x +1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y +1 ; } } } } // Ajout des lignes 68 à 82 pour travailler sur la mémoire shared __global__ void MergeSmallShared_k(TYPE *GlobalCudaA, TYPE *GlobalCudaB, TYPE *M, int sizeA, int sizeB) { extern __shared__ TYPE dataAB[] ; // j utilise la mémoire partagée entre les threads unsigned int tid = threadIdx.x; // numéro du thread dans le block courant unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // numéro du thread sur l ensemble des blocks if (tid >= (sizeA + sizeB)) { return ; } // On gère les ébordements // Chargement des données dans la mémoire partagée par le thread ; dataAB[tid] = (i < sizeA)?GlobalCudaA[i]:GlobalCudaB[i-sizeA] ; // voir si on travaille sur le vecteur A ou B // On attend que tous les threads aient faits le travail ( chargé la mémoire) __syncthreads(); // // On recadre nos pointeurs pourqu'ils pointent vers la mémoire partagée et non la globale TYPE * A = dataAB ; TYPE * B = dataAB + sizeA ; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // QUESTION 2 // ********************************************************************************************************************************************** __global__ void PathBig(TYPE * CudaVecteurA, TYPE * CudaVecteurB, int sizeA , int sizeB, int * CudaDiagBx, int * CudaDiagAy, int nbthread, int NbWindows) { // A : an array of size sizeA // B : an array of size sizeB // (CudaDiagBx,CudaDiagAy) recieve the respective coordinates of the "red points" // nbthread : Number of threads, preferably 1024 // NbWindows : Number of windows //Initialisation diagolane CudaDiagBx[0] = CudaDiagAy[0] = 0 ; //(0,0) CudaDiagBx[NbWindows] = sizeB ; CudaDiagAy[NbWindows] = sizeA ; //(sizeA,sizeB) int nth = threadIdx.x; // On explore le nth diagonale Point K, P, Q ; int px , py ; TYPE * A = CudaVecteurA ; TYPE * B = CudaVecteurB ; int offset ; int numDiag = (nth+1) * nbthread -1 ; // Les tableaux vont de 0 à N-1 if (numDiag > sizeA) { K.x = numDiag - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = numDiag - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = numDiag ; P.x = numDiag ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { px = Q.x ; py = Q.y ; if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { // v = A[Q.y] ; py ++ ; } else { // v = B[Q.x] ; px ++ ; } // printf("Analyse Diagonale Point de Sortie ref %d - M %" FMT " Q (A Q.y %d) (B Q.x %d) rv.x %d rv.y %d\n",i,v,Q.y,Q.x,rv->x,rv->y) ; CudaDiagBx[nth+1] = px ; CudaDiagAy[nth+1] = py ; break ; // Pour simuler passage au thread suivant } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } __global__ void MergeBig_k(TYPE * CudaVecteurA, TYPE * CudaVecteurB, TYPE * CudaVecteurC, int * CudaDiagAy, int * CudaDiagBx , int nbthread) { // int i = threadIdx.x ; // On renge le Ieme element int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet int diag = (i / nbthread) ; // Dans quel fenêtre est-il ? int indC = nbthread * diag ; TYPE *A = CudaVecteurA+CudaDiagAy[diag] ; TYPE *B = CudaVecteurB+CudaDiagBx[diag] ; TYPE *M = CudaVecteurC + indC ; int sizeA = CudaDiagAy[diag+1]-CudaDiagAy[diag] ; int sizeB = CudaDiagBx[diag+1]-CudaDiagBx[diag] ; Point K, P, Q; int offset ; i = i % nbthread ; // On recadre i dans le nouvel espace if (i >= (sizeA + sizeB)) { return ; } // On gère les ébordements if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // QUESTION 3 // ********************************************************************************************************************************************** // Nous vous avons expliquer l'algortihme de Merge Sort précédement, donc nous travaillons par taille : t = t*2 // Tout d'abord, il est important de noter qu'on ne travaille en parallèle qu'à partir de la taille 4 pour optimiser le code // Taille 1 : tri à la "main" sur le HOST // Taille 2 : tri de l'algorithme A de l'anoncé sur le HOST // A partir de la taille 4 : mise en place de CUDA sur GPU pour paralléliser : si size A + sizeB <= 1024 : appelle MergeSmall sinon PathBig et MergeBig // Ensuite nous avons une notion de FLIP/FLOP : mis en place pour éviter de nombreuses copies et ainsi gagner du temps // Concernant la notion de FLIP/FLOP, je vais l'expliquer avec un schéma avant de monter sur le code void MergeSort(TYPE * M, int sizeM) { //Declarations cudaError_t errCuda; TYPE * ptori = NULL ; // pointeur origine TYPE * ptdest = NULL ; // pointeur destination TYPE * pttmp ; TYPE * cudaOri = NULL ; // pointeur orgine dans CUDA TYPE * cudaDest = NULL ; // pointeur dest dans CUDA int t ; int * CudaDiagBx = NULL ; int * CudaDiagAy = NULL ; //Allocation if ((ptdest = (TYPE *) malloc(sizeM * sizeof(TYPE))) == NULL) { printf("PB allocation VecteurM2n") ; exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaOri, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaOri - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaDest, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaDest - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&CudaDiagBx, (1025 + 1) * sizeof(int)))) { printf("PB allocation CudaDiagBx pour - %d - %s \n", errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&CudaDiagAy, (1025 + 1)* sizeof(int)))) { printf("PB allocation CudaDiagAy - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } ptori = M ; // Première itération on le trie à la main pour gagner du temps for (int i = 0 ; i < sizeM ; i += 2 ) { if (ptori[i] > ptori[i+1]) { ptdest[i+1] = ptori[i]; ptdest[i] = ptori[i+1] ; } else { ptdest[i] = ptori[i]; ptdest[i+1] = ptori[i+1]; } } // Flip Flop entre ptori et ptdest pttmp = ptdest ; ptdest= ptori ; ptori = pttmp ; t=2; // Seconde itération on le fait en séquentiel avec l'algo A du sujet pour gagner du temps for (int i = 0 ; i < sizeM ; i = i+(2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * ptA = ptori + i; TYPE * ptB = ptori + i + sizeA ; TYPE * ptM = ptdest + i ; MergeSimpleHOST(ptA, ptB, ptM, sizeA , sizeB) ; } if (cudaSuccess != (errCuda = cudaMemcpy(cudaOri, ptdest, sizeM * sizeof(TYPE), cudaMemcpyHostToDevice))) { printf("PB Copie Host ptDest -> cudaOri - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Le reste des itérations on utilise mergesmall et mergebig for ( t = 4 ; t < sizeM ; t= t*2) { for ( int i = 0 ; i < sizeM ; i = i + (2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * CudaVecteurA = cudaOri + i ; TYPE * CudaVecteurB = cudaOri + i + sizeA ; if ((sizeA == 0) || (sizeB == 0)) { if (sizeA != 0) { if (cudaSuccess != (errCuda = cudaMemcpy(cudaDest + i, CudaVecteurA , sizeA * sizeof(TYPE), cudaMemcpyDeviceToDevice))) { printf("PB Copie Cuda A -> ptDes rab %d - %d - %s \n",sizeA, errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } } continue ; } //Merge if (sizeA+sizeB <= 1024) { MergeSmall_k<<<1,sizeA+sizeB>>> (CudaVecteurA,CudaVecteurB,cudaDest+i,sizeA,sizeB); } else { int nbthread = 1024; int NbDiagonale = (sizeA + sizeB) / nbthread ; if (NbDiagonale > 1024) { printf("Oups, on n'a pas fait le code pour nbDiag %d > 1024\n",NbDiagonale) ; return ; } int NbWindows = NbDiagonale ; NbWindows += (((sizeA + sizeB) % nbthread) == 0)?0:1 ; // si (SizeA + SizeB) % nbthread == 0 alors nbWindows = 0 sinon = 1 PathBig<<<1,NbDiagonale>>>(CudaVecteurA, CudaVecteurB, sizeA , sizeB, CudaDiagBx, CudaDiagAy, nbthread,NbWindows) ; int nbBlock = (sizeA+sizeB) / 1024 ; nbBlock += ((sizeA+sizeB) % 1024)?1:0 ; MergeBig_k<<<nbBlock,1024>>> (CudaVecteurA, CudaVecteurB, cudaDest+i, CudaDiagAy, CudaDiagBx, nbthread) ; } }// End for i // Flip Flop entre les bancs cudaOri et cudaDest TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } // End of loop t if (cudaSuccess != cudaMemcpy(M, cudaOri, sizeM * sizeof(TYPE), cudaMemcpyDeviceToHost)) { printf("PB copie cuda M -> host M \n") ; fflush(stdout); exit(2) ; } // Free if (cudaOri != NULL) { cudaFree(cudaOri) ; cudaOri = NULL ; } if (cudaDest != NULL) { cudaFree(cudaDest) ; cudaDest = NULL ; } if (CudaDiagAy != NULL) { cudaFree(CudaDiagAy) ; CudaDiagAy = NULL ; } if (CudaDiagBx != NULL) { cudaFree(CudaDiagBx) ; CudaDiagBx = NULL ; } } // ********************************************************************************************************************************************** // QUESTION 5 // ********************************************************************************************************************************************** __global__ void MergeSmallBatch_k(TYPE *ABAB, int sizeM_tot, TYPE* MM, int d) { int i = threadIdx.x%d; int Qt = (threadIdx.x-i)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); if (threadIdx.x + blockIdx.x*blockDim.x >= sizeM_tot) return; int t = d/2; int sizeA = t; int sizeB = t; ABAB=ABAB+gbx*d; TYPE* A=ABAB; TYPE* B=A+sizeA; TYPE* M=MM+gbx*d; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { M[i] = A[Q.y] ; } else { M[i] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } // ********************************************************************************************************************************************** // PARTIE 3 // ********************************************************************************************************************************************** void MergeSort(TYPE * M, int sizeM) { //Declarations cudaError_t errCuda; TYPE * cudaOri = NULL ; // pointeur orgine dans CUDA TYPE * cudaDest = NULL ; // pointeur dest dans CUDA int * CudaDiagBx = NULL ; int * CudaDiagAy = NULL ; int t ; //Allocation if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaOri, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaVecteurM1 - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaDest, sizeM * sizeof(TYPE)))) { printf("PB allocation CudaVecteurM2 - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&CudaDiagBx, 1026 * sizeof(int)))) { printf("PB allocation CudaDiagBx %d - %d - %s \n", errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } if (cudaSuccess != (errCuda = cudaMalloc((void**)&CudaDiagAy, 1026 * sizeof(int)))) { printf("PB allocation CudaDiagAy - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Initialiser cudaOri if (cudaSuccess != (errCuda = cudaMemcpy(cudaOri, M, sizeM * sizeof(TYPE), cudaMemcpyHostToDevice))) { printf("PB Copie Host ptDest -> cudaOri - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } //Trier cudaOri par blocs de 2, puis par blocs de 4, etc jusqua 512 for ( t = 1 ; t <= 512 and t<sizeM ; t= t*2){ //partie divisible par d int d=t*2; int size_AetB = sizeM%d;//taille restante MergeSmallBatch_k<<<1024,1024>>>(cudaOri,sizeM-size_AetB,cudaDest,t*2); //partie restante int sizeA = min(size_AetB,t); int sizeB = size_AetB - sizeA; TYPE* cudaM = cudaDest+sizeM-size_AetB; TYPE* cudaA = cudaOri+sizeM-size_AetB; TYPE* cudaB = cudaA+sizeA; MergeSmall_k<<<1,sizeA+sizeB>>> (cudaA,cudaB,cudaM,sizeA,sizeB); // Flip Flop entre les bancs cudaDest et cudaOri TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } //t=512 on trie par blocs de taille supérieure à laide de PathBig et MergeBig for ( t = t ; t < sizeM ; t= t*2) { for ( int i = 0 ; i < sizeM ; i = i + (2*t)) { int sizeA = min(t,sizeM-i); int sizeB = min(t,max(sizeM-(i+t),0)); TYPE * CudaVecteurA = cudaOri + i ; TYPE * CudaVecteurB = cudaOri + i + sizeA ; if ((sizeA == 0) || (sizeB == 0)) { if (sizeA != 0) { if (cudaSuccess != (errCuda = cudaMemcpy(cudaDest + i, CudaVecteurA , sizeA * sizeof(TYPE), cudaMemcpyDeviceToDevice))) { printf("PB Copie Cuda A -> ptDes rab %d - %d - %s \n",sizeA, errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } } continue ; } //Merge if (sizeA+sizeB <= 1024) { printf("Oups, on est censé avoir traité ce cas par le merge batch\n") ; return ;} int nbthread = 1024; int NbDiagonale = (sizeA + sizeB) / nbthread ; if (NbDiagonale > 1024) { printf("Oups, on n'a pas fait le code pour nbDiag %d > 1024\n",NbDiagonale) ; return ; } int NbWindows = NbDiagonale ; NbWindows += (((sizeA + sizeB) % nbthread) == 0)?0:1 ; // si (SizeA + SizeB) % nbthread == 0 alors nbWindows = 0 sinon = 1 PathBig<<<1,NbDiagonale>>>(CudaVecteurA, CudaVecteurB, sizeA , sizeB, CudaDiagBx, CudaDiagAy, nbthread,NbWindows) ; int nbBlock = (sizeA+sizeB) / 1024 ; nbBlock += ((sizeA+sizeB) % 1024)?1:0 ; MergeBig_k<<<nbBlock,1024>>> (CudaVecteurA, CudaVecteurB, cudaDest+i, CudaDiagAy, CudaDiagBx, nbthread) ; } // End for i // Flip Flop entre les bancs cudaDest et cudaOri TYPE * cudaTmp = cudaDest ; cudaDest = cudaOri ; cudaOri = cudaTmp ; } //cudaOri est entièrement trié //remettre dans M if (cudaSuccess != cudaMemcpy(M, cudaOri, sizeM * sizeof(TYPE), cudaMemcpyDeviceToHost)) { printf("PB copie cuda M -> host M \n") ; fflush(stdout); exit(2) ; } // Free if (cudaOri != NULL) { cudaFree(cudaOri) ; cudaOri = NULL ; } if (cudaDest != NULL) { cudaFree(cudaDest) ; cudaDest = NULL ; } if (CudaDiagAy != NULL) { cudaFree(CudaDiagAy) ; CudaDiagAy = NULL ; } if (CudaDiagBx != NULL) { cudaFree(CudaDiagBx) ; CudaDiagBx = NULL ; } }
64aa23164d2136464f4d77864f8c33e86db25b84.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "python/tools/kernel_explorer/kernels/rocm/gemm_softmax_gemm_permute.h" #include "pybind11/stl.h" #include "contrib_ops/rocm/bert/batched_gemm_softmax_gemm_permute_pipelines.cuh" #include "core/providers/rocm/tunable/rocm_tunable.h" #include "python/tools/kernel_explorer/device_array.h" #include "python/tools/kernel_explorer/kernel_explorer_interface.h" #include <vector> namespace py = pybind11; using namespace onnxruntime::contrib::rocm; namespace onnxruntime { template <typename T> class IGemmSoftmaxGemmPermuteKernelExplorer : public IKernelExplorer { public: IGemmSoftmaxGemmPermuteKernelExplorer( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) { ROCBLAS_CALL_THROW(rocblas_create_handle(&rocblas_handle_)); attn_.batch_size = batch; attn_.sequence_length = seqlen; attn_.kv_sequence_length = seqlen; // NOTE: not used attn_.past_sequence_length = 0; attn_.original_past_sequence_length = 0; // NOTE: not used attn_.total_sequence_length = total_seqlen; attn_.max_sequence_length = 0; attn_.hidden_size = num_heads * head_size; attn_.head_size = head_size; attn_.v_hidden_size = attn_.hidden_size; // Q,K,V hidden size must agree now attn_.v_head_size = attn_.head_size; // Q,K,V hidden size must agree now attn_.num_heads = num_heads; attn_.is_unidirectional = false; attn_.past_present_share_buffer = false; attn_.do_rotary = false; attn_.mask_filter_value = -10000.0f; attn_.scale = scale; if (mask_dim == 0) { attn_.mask_type = contrib::MASK_NONE; } else if (mask_dim == 2) { attn_.mask_type = contrib::MASK_2D_KEY_PADDING; } else if (mask_dim == 3) { attn_.mask_type = contrib::MASK_3D_ATTENTION; } else if (mask_dim == 4) { attn_.mask_type = contrib::MASK_4D_MEGATRON; } else { ORT_ENFORCE(false, "mask type not supported"); } device_prop = GetEp()->GetDeviceProp(); params_.tuning_ctx = TuningContext(); params_.stream = Stream(); params_.handle = rocblas_handle_; params_.attention = &attn_; params_.device_prop = &device_prop; params_.scale = scale; params_.q_buffer = reinterpret_cast<T*>(Q.ptr()); params_.k_buffer = reinterpret_cast<T*>(K.ptr()); params_.v_buffer = reinterpret_cast<T*>(V.ptr()); if (attn_bias.has_value()) { params_.bias_buffer = reinterpret_cast<T*>(attn_bias->ptr()); } if (attn_mask.has_value()) { params_.mask_index_buffer = reinterpret_cast<int*>(attn_mask->ptr()); if (mask_dim == 2) { params_.mask_index_dims = {batch, total_seqlen}; } else if (mask_dim == 3) { params_.mask_index_dims = {batch, seqlen, total_seqlen}; } else if (mask_dim == 4) { ORT_ENFORCE(max_seqlen.has_value()); attn_.max_sequence_length = max_seqlen.value(); ORT_ENFORCE(attn_.max_sequence_length >= seqlen); attn_.past_sequence_length = attn_.max_sequence_length - seqlen; params_.mask_index_dims = {batch, 1, attn_.max_sequence_length, attn_.max_sequence_length}; } } params_.out_buffer = reinterpret_cast<T*>(out.ptr()); } ~IGemmSoftmaxGemmPermuteKernelExplorer() { ROCBLAS_CALL_THROW(rocblas_destroy_handle(rocblas_handle_)); } void SetWorkspace(size_t num_bytes) { void* ptr; HIP_CALL_THROW(hipMalloc(&ptr, num_bytes)); workspace_.reset(ptr, [](void* ptr) { HIP_CALL_THROW(hipFree(ptr)); }); params_.workspace_buffer = reinterpret_cast<T*>(workspace_.get()); } protected: using ParamsT = contrib::rocm::GemmSoftmaxGemmPermuteParams<T>; rocblas_handle rocblas_handle_; hipDeviceProp_t device_prop; contrib::AttentionParameters attn_; ParamsT params_; std::shared_ptr<void> workspace_; }; // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteGeneric : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteGeneric( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_)); } std::vector<std::string> ListOps() const { return {"Generic"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteGenericPipeline<T>::Run( &this->params_, /*use_persistent_softmax=*/false)); } }; #ifdef USE_COMPOSABLE_KERNEL template <typename T, bool USE_BIAS, bool USE_MASK> class GemmSoftmaxGemmPermuteCK : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteCK( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_)); for (auto&& [ts, op] : GetCKGemmSoftmaxGemmPermuteTypeStringAndOps<T, USE_BIAS, USE_MASK>()) { type_strings_.emplace_back(std::move(ts)); ops_.emplace_back(std::move(op)); } } std::vector<std::string> ListOps() const { return type_strings_; } bool SelectOp(const std::string& name) { for (size_t i = 0; i < ops_.size(); i++) { if (type_strings_[i] == name) { selected_op_ = i; Status status = ops_[i].IsSupported(&this->params_); return status.IsOK(); } } ORT_THROW("Cannot find implementation ", name); } void Run() override { ORT_THROW_IF_ERROR(ops_[selected_op_](&this->params_)); } private: using ParamsT = typename IGemmSoftmaxGemmPermuteKernelExplorer<T>::ParamsT; using OpT = Op<ParamsT>; std::vector<OpT> ops_; std::vector<std::string> type_strings_; size_t selected_op_{}; }; #endif // USE_COMPOSABLE_KERNEL // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteTunable : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteTunable( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(::max( GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_), GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_))); this->params_.TuningContext()->EnableTunableOp(); } std::vector<std::string> ListOps() const { return {"Tunable"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteTunableOp<T>{}(&this->params_)); } }; #define REGISTER_COMMON(name, type, ...) \ py::class_<type<__VA_ARGS__>>(m, name) \ .def(py::init<int64_t, int64_t, int64_t, std::optional<int64_t>, int64_t, int64_t, int64_t, \ float, \ DeviceArray&, \ DeviceArray&, \ DeviceArray&, \ std::optional<DeviceArray>&, \ std::optional<DeviceArray>&, \ DeviceArray&>()) \ .def("SetRepeats", &type<__VA_ARGS__>::SetRepeats) \ .def("Run", &type<__VA_ARGS__>::Run) \ .def("Profile", &type<__VA_ARGS__>::Profile) \ .def("ListOps", &type<__VA_ARGS__>::ListOps) \ .def("SelectOp", &type<__VA_ARGS__>::SelectOp); #define REGISTER_GENERIC(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteGeneric_" #dtype, GemmSoftmaxGemmPermuteGeneric, dtype) #define REGISTER_CK(dtype, biased, masked, mask_bias_suffix) \ REGISTER_COMMON( \ "GemmSoftmaxGemmPermuteCK" mask_bias_suffix "_" #dtype, GemmSoftmaxGemmPermuteCK, dtype, biased, masked) #define REGISTER_TUNABLE(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteTunable_" #dtype, GemmSoftmaxGemmPermuteTunable, dtype) void InitGemmSoftmaxGemmPermute(py::module m) { REGISTER_GENERIC(half); #ifdef USE_COMPOSABLE_KERNEL REGISTER_CK(half, false, false, ""); REGISTER_CK(half, true, false, "Biased"); REGISTER_CK(half, false, true, "Masked"); REGISTER_CK(half, true, true, "BiasedMasked"); #endif REGISTER_TUNABLE(half); } } // namespace onnxruntime
64aa23164d2136464f4d77864f8c33e86db25b84.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "python/tools/kernel_explorer/kernels/rocm/gemm_softmax_gemm_permute.h" #include "pybind11/stl.h" #include "contrib_ops/rocm/bert/batched_gemm_softmax_gemm_permute_pipelines.cuh" #include "core/providers/rocm/tunable/rocm_tunable.h" #include "python/tools/kernel_explorer/device_array.h" #include "python/tools/kernel_explorer/kernel_explorer_interface.h" #include <vector> namespace py = pybind11; using namespace onnxruntime::contrib::rocm; namespace onnxruntime { template <typename T> class IGemmSoftmaxGemmPermuteKernelExplorer : public IKernelExplorer { public: IGemmSoftmaxGemmPermuteKernelExplorer( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) { ROCBLAS_CALL_THROW(rocblas_create_handle(&rocblas_handle_)); attn_.batch_size = batch; attn_.sequence_length = seqlen; attn_.kv_sequence_length = seqlen; // NOTE: not used attn_.past_sequence_length = 0; attn_.original_past_sequence_length = 0; // NOTE: not used attn_.total_sequence_length = total_seqlen; attn_.max_sequence_length = 0; attn_.hidden_size = num_heads * head_size; attn_.head_size = head_size; attn_.v_hidden_size = attn_.hidden_size; // Q,K,V hidden size must agree now attn_.v_head_size = attn_.head_size; // Q,K,V hidden size must agree now attn_.num_heads = num_heads; attn_.is_unidirectional = false; attn_.past_present_share_buffer = false; attn_.do_rotary = false; attn_.mask_filter_value = -10000.0f; attn_.scale = scale; if (mask_dim == 0) { attn_.mask_type = contrib::MASK_NONE; } else if (mask_dim == 2) { attn_.mask_type = contrib::MASK_2D_KEY_PADDING; } else if (mask_dim == 3) { attn_.mask_type = contrib::MASK_3D_ATTENTION; } else if (mask_dim == 4) { attn_.mask_type = contrib::MASK_4D_MEGATRON; } else { ORT_ENFORCE(false, "mask type not supported"); } device_prop = GetEp()->GetDeviceProp(); params_.tuning_ctx = TuningContext(); params_.stream = Stream(); params_.handle = rocblas_handle_; params_.attention = &attn_; params_.device_prop = &device_prop; params_.scale = scale; params_.q_buffer = reinterpret_cast<T*>(Q.ptr()); params_.k_buffer = reinterpret_cast<T*>(K.ptr()); params_.v_buffer = reinterpret_cast<T*>(V.ptr()); if (attn_bias.has_value()) { params_.bias_buffer = reinterpret_cast<T*>(attn_bias->ptr()); } if (attn_mask.has_value()) { params_.mask_index_buffer = reinterpret_cast<int*>(attn_mask->ptr()); if (mask_dim == 2) { params_.mask_index_dims = {batch, total_seqlen}; } else if (mask_dim == 3) { params_.mask_index_dims = {batch, seqlen, total_seqlen}; } else if (mask_dim == 4) { ORT_ENFORCE(max_seqlen.has_value()); attn_.max_sequence_length = max_seqlen.value(); ORT_ENFORCE(attn_.max_sequence_length >= seqlen); attn_.past_sequence_length = attn_.max_sequence_length - seqlen; params_.mask_index_dims = {batch, 1, attn_.max_sequence_length, attn_.max_sequence_length}; } } params_.out_buffer = reinterpret_cast<T*>(out.ptr()); } ~IGemmSoftmaxGemmPermuteKernelExplorer() { ROCBLAS_CALL_THROW(rocblas_destroy_handle(rocblas_handle_)); } void SetWorkspace(size_t num_bytes) { void* ptr; HIP_CALL_THROW(hipMalloc(&ptr, num_bytes)); workspace_.reset(ptr, [](void* ptr) { HIP_CALL_THROW(hipFree(ptr)); }); params_.workspace_buffer = reinterpret_cast<T*>(workspace_.get()); } protected: using ParamsT = contrib::rocm::GemmSoftmaxGemmPermuteParams<T>; rocblas_handle rocblas_handle_; hipDeviceProp_t device_prop; contrib::AttentionParameters attn_; ParamsT params_; std::shared_ptr<void> workspace_; }; // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteGeneric : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteGeneric( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_)); } std::vector<std::string> ListOps() const { return {"Generic"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteGenericPipeline<T>::Run( &this->params_, /*use_persistent_softmax=*/false)); } }; #ifdef USE_COMPOSABLE_KERNEL template <typename T, bool USE_BIAS, bool USE_MASK> class GemmSoftmaxGemmPermuteCK : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteCK( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_)); for (auto&& [ts, op] : GetCKGemmSoftmaxGemmPermuteTypeStringAndOps<T, USE_BIAS, USE_MASK>()) { type_strings_.emplace_back(std::move(ts)); ops_.emplace_back(std::move(op)); } } std::vector<std::string> ListOps() const { return type_strings_; } bool SelectOp(const std::string& name) { for (size_t i = 0; i < ops_.size(); i++) { if (type_strings_[i] == name) { selected_op_ = i; Status status = ops_[i].IsSupported(&this->params_); return status.IsOK(); } } ORT_THROW("Cannot find implementation ", name); } void Run() override { ORT_THROW_IF_ERROR(ops_[selected_op_](&this->params_)); } private: using ParamsT = typename IGemmSoftmaxGemmPermuteKernelExplorer<T>::ParamsT; using OpT = Op<ParamsT>; std::vector<OpT> ops_; std::vector<std::string> type_strings_; size_t selected_op_{}; }; #endif // USE_COMPOSABLE_KERNEL // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteTunable : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteTunable( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(std::max( GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_), GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_))); this->params_.TuningContext()->EnableTunableOp(); } std::vector<std::string> ListOps() const { return {"Tunable"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteTunableOp<T>{}(&this->params_)); } }; #define REGISTER_COMMON(name, type, ...) \ py::class_<type<__VA_ARGS__>>(m, name) \ .def(py::init<int64_t, int64_t, int64_t, std::optional<int64_t>, int64_t, int64_t, int64_t, \ float, \ DeviceArray&, \ DeviceArray&, \ DeviceArray&, \ std::optional<DeviceArray>&, \ std::optional<DeviceArray>&, \ DeviceArray&>()) \ .def("SetRepeats", &type<__VA_ARGS__>::SetRepeats) \ .def("Run", &type<__VA_ARGS__>::Run) \ .def("Profile", &type<__VA_ARGS__>::Profile) \ .def("ListOps", &type<__VA_ARGS__>::ListOps) \ .def("SelectOp", &type<__VA_ARGS__>::SelectOp); #define REGISTER_GENERIC(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteGeneric_" #dtype, GemmSoftmaxGemmPermuteGeneric, dtype) #define REGISTER_CK(dtype, biased, masked, mask_bias_suffix) \ REGISTER_COMMON( \ "GemmSoftmaxGemmPermuteCK" mask_bias_suffix "_" #dtype, GemmSoftmaxGemmPermuteCK, dtype, biased, masked) #define REGISTER_TUNABLE(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteTunable_" #dtype, GemmSoftmaxGemmPermuteTunable, dtype) void InitGemmSoftmaxGemmPermute(py::module m) { REGISTER_GENERIC(half); #ifdef USE_COMPOSABLE_KERNEL REGISTER_CK(half, false, false, ""); REGISTER_CK(half, true, false, "Biased"); REGISTER_CK(half, false, true, "Masked"); REGISTER_CK(half, true, true, "BiasedMasked"); #endif REGISTER_TUNABLE(half); } } // namespace onnxruntime
87825210ed220d5277da65b1e6c07230f70b199c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "lite/core/op_registry.h" #include "lite/kernels/cuda/search_fc_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> static void anakin_NV_gemv(hipblasHandle_t handle, const bool TransA, const int M, const int N, const T alpha, const T* A, const T* x, const T beta, T* y); template <> void anakin_NV_gemv<float>(hipblasHandle_t handle, const bool TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == false) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK( hipblasSgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <typename T> static void anakin_NV_gemm(hipblasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const T alpha, const T* A, const T* B, const T beta, T* C); template <> void anakin_NV_gemm<float>(hipblasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (!TransA /* == CblasNoTrans*/) ? K : M; int ldb = (!TransB /* == CblasNoTrans*/) ? N : K; hipblasOperation_t cuTransA = (!TransA /* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (!TransB /* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void anakin_NV_gemm<char>(hipblasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const char alpha, const char* A, const char* B, const char beta, char* C) { LOG(FATAL) << "int8 gemm is not implemented"; } template <typename T> static __global__ void add_bias(int n, int output_size, const T* bias, T* dout) { int index = blockIdx.x * blockDim.x + threadIdx.x; int bias_index = index % output_size; if (index < n) { dout[index] = dout[index] + bias[bias_index]; } } template <typename T> void SearchFcCompute<T>::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); const Tensor* x_tensor = param.X; param.Out->Resize({x_tensor->dims()[0], param.out_size}); _M = x_tensor->dims().count(0, 1); _K = x_tensor->dims().count(1, x_tensor->numel()); _N = param.out_size; const T* din = x_tensor->data<T>(); Tensor* out_tensor = param.Out; T* dout = out_tensor->mutable_data<T>(TARGET(kCUDA)); const Tensor* w_tensor = param.W; const T* weight = w_tensor->data<T>(); const Tensor* b_tensor = param.b; const T* bias = b_tensor->data<T>(); hipblasCreate(&_handle); if (_M == 1 && _K > 50000) { anakin_NV_gemv<T>(_handle, false, _N, _K, (T)1, weight, din, (T)0, dout); } else { anakin_NV_gemm<T>(_handle, false, !_flag_trans_weights, _M, _N, _K, (T)1, din, weight, (T)0, dout); } int total_size = _M * _N; hipLaunchKernelGGL(( add_bias<T>), dim3(CUDA_GET_BLOCKS(total_size)), dim3(CUDA_NUM_THREADS), 0, stream, total_size, _N, bias, dout); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(search_fc, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SearchFcCompute<float>, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("b", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
87825210ed220d5277da65b1e6c07230f70b199c.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "lite/core/op_registry.h" #include "lite/kernels/cuda/search_fc_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> static void anakin_NV_gemv(cublasHandle_t handle, const bool TransA, const int M, const int N, const T alpha, const T* A, const T* x, const T beta, T* y); template <> void anakin_NV_gemv<float>(cublasHandle_t handle, const bool TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == false) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK( cublasSgemv(handle, cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <typename T> static void anakin_NV_gemm(cublasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const T alpha, const T* A, const T* B, const T beta, T* C); template <> void anakin_NV_gemm<float>(cublasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (!TransA /* == CblasNoTrans*/) ? K : M; int ldb = (!TransB /* == CblasNoTrans*/) ? N : K; cublasOperation_t cuTransA = (!TransA /* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (!TransB /* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void anakin_NV_gemm<char>(cublasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const char alpha, const char* A, const char* B, const char beta, char* C) { LOG(FATAL) << "int8 gemm is not implemented"; } template <typename T> static __global__ void add_bias(int n, int output_size, const T* bias, T* dout) { int index = blockIdx.x * blockDim.x + threadIdx.x; int bias_index = index % output_size; if (index < n) { dout[index] = dout[index] + bias[bias_index]; } } template <typename T> void SearchFcCompute<T>::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); const Tensor* x_tensor = param.X; param.Out->Resize({x_tensor->dims()[0], param.out_size}); _M = x_tensor->dims().count(0, 1); _K = x_tensor->dims().count(1, x_tensor->numel()); _N = param.out_size; const T* din = x_tensor->data<T>(); Tensor* out_tensor = param.Out; T* dout = out_tensor->mutable_data<T>(TARGET(kCUDA)); const Tensor* w_tensor = param.W; const T* weight = w_tensor->data<T>(); const Tensor* b_tensor = param.b; const T* bias = b_tensor->data<T>(); cublasCreate(&_handle); if (_M == 1 && _K > 50000) { anakin_NV_gemv<T>(_handle, false, _N, _K, (T)1, weight, din, (T)0, dout); } else { anakin_NV_gemm<T>(_handle, false, !_flag_trans_weights, _M, _N, _K, (T)1, din, weight, (T)0, dout); } int total_size = _M * _N; add_bias<T><<<CUDA_GET_BLOCKS(total_size), CUDA_NUM_THREADS, 0, stream>>>( total_size, _N, bias, dout); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(search_fc, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SearchFcCompute<float>, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindInput("b", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize();
24ae3c5b19b84eba33cb9990b261a50259afb869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef ENABLE_MPI #include <infiniband/verbs.h> #include <boost/preprocessor.hpp> #include <collectives/ib_comm.hpp> #include <iostream> #include <sstream> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { #define MAX_AR_CHANNELS 31 IbComm::ARCollContext::ARCollContext(IbComm* comm) { size_t num_gpus = comm->num_gpus_; num_gpus_ = num_gpus; std::generate_n(std::back_inserter(ctx_), num_gpus, [] { return std::make_unique<ARCollContextPerGPU>(); }); // Read config params from env if (getenv("ONESHOT_NBLOCKS")) { cfg_nblocks_ = atoi(getenv("ONESHOT_NBLOCKS")); } if (getenv("ONESHOT_ALIGN_BLOCK")) { cfg_align_block_ = atoi(getenv("ONESHOT_ALIGN_BLOCK")); } if (getenv("ONESHOT_MIN_BLOCK")) { cfg_min_block_ = atoi(getenv("ONESHOT_MIN_BLOCK")); } if (getenv("ONESHOT_NCHANNELS")) { cfg_nchannels_ = atoi(getenv("ONESHOT_NCHANNELS")); } PROXY_ASSERT_MSG(cfg_nchannels_ <= MAX_AR_CHANNELS, "Max oneshot channels is 31"); PROXY_ASSERT(cfg_nblocks_ <= AR_MAX_BLOCKS); MESSAGE_("using oneshot nblocks: " + std::to_string(cfg_nblocks_)); MESSAGE_("using oneshot nchannels: " + std::to_string(cfg_nchannels_)); MESSAGE_("using oneshot min block: " + std::to_string(cfg_min_block_)); } void IbComm::ARCollContext::update_size(size_t ar_size) { // calculate peerblock size PROXY_ASSERT_MSG((ar_size % (num_gpus_ * 16)) == 0, "AR size needs to be aligned to num_gpus*16"); ar_size_ = ar_size; blocksize_ = (cfg_nblocks_ - 1 + (cfg_align_block_ - 1 + ar_size) / cfg_align_block_) / cfg_nblocks_; blocksize_ *= cfg_align_block_; if (blocksize_ < cfg_min_block_) { blocksize_ = cfg_min_block_; } peer_blocklines_ = blocksize_ / sizeof(uint4) / num_gpus_; num_blocks_ = (ar_size + blocksize_ - 1) / blocksize_; PROXY_ASSERT(num_blocks_ <= AR_MAX_BLOCKS); } ARCollHandle IbComm::register_ar_coll() { ar_coll_ctx_.emplace_back(std::make_unique<ARCollContext>(this)); ARCollHandle coll_handle = (ARCollHandle)(ar_coll_ctx_.size() - 1); for (size_t g = 0; g < num_gpus_; g++) { M2PARCollInit coll_init_cmd_; coll_init_cmd_.coll_handle_ = coll_handle; coll_init_cmd_.cfg_nblocks_ = ar_coll_ctx_[coll_handle]->cfg_nblocks_; coll_init_cmd_.cfg_align_block_ = ar_coll_ctx_[coll_handle]->cfg_align_block_; coll_init_cmd_.cfg_min_block_ = ar_coll_ctx_[coll_handle]->cfg_min_block_; ARCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } template <> sharp_datatype IbComm::get_sharp_dtype<int>() { return SHARP_DTYPE_INT; } template <> sharp_datatype IbComm::get_sharp_dtype<uint32_t>() { return SHARP_DTYPE_UNSIGNED; } template <> sharp_datatype IbComm::get_sharp_dtype<__half>() { return SHARP_DTYPE_FLOAT_SHORT; } template <> sharp_datatype IbComm::get_sharp_dtype<float>() { return SHARP_DTYPE_FLOAT; } template <typename T> void IbComm::set_ar_coll_buf(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id) { PROXY_ASSERT(ar_size != 0); auto& coll_ctx = *ar_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = ARBufInitCmd(); ARBufInitCmd& cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PARBufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_ar_ptr_ = ar_ptr; buf_init.coll_handle_ = coll; buf_init.d_ar_ptr_ = ar_ptr; buf_init.ar_size_ = ar_size; buf_init.sharp_dtype_ = get_sharp_dtype<T>(); buf_init.element_size_ = sizeof(T); if (coll_ctx.ar_size_ != 0) { PROXY_ASSERT(ar_size == coll_ctx.ar_size_); } coll_ctx.ar_size_ = ar_size; PROXY_ASSERT_MSG(((size_t)ar_ptr & 0xf) == 0, "AR pointer needs to aligned to 16B"); } template void IbComm::set_ar_coll_buf<__half>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<float>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<uint32_t>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); #define MAX_LOCAL_RANKS 32 #define TOTAL_FLAGS (2 * MAX_LOCAL_RANKS + MAX_AR_CHANNELS) void IbComm::register_ar_coll_buf(ARCollHandle coll) { auto& coll_ctx = ar_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); // Allocations for (size_t g = 0; g < num_gpus_; g++) { CK_CUDA_THROW_(hipSetDevice(device_list_[g])); auto& gpu_ctx = *coll_ctx->ctx_[g]; gpu_ctx.buf_ = GeneralBuffer2<CudaAllocator>::create(); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_peer_ptrs_); gpu_ctx.buf_->reserve({1}, &gpu_ctx.d_coll_cmd_); gpu_ctx.buf_->reserve({TOTAL_FLAGS}, &gpu_ctx.d_flags_); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_flags_ptrs_); gpu_ctx.buf_->allocate(); CK_CUDA_THROW_(hipMemset(gpu_ctx.buf_->get_ptr(), 0, gpu_ctx.buf_->get_size_in_bytes())); } // Get proxy output std::vector<void*> h_peer_ptrs(num_gpus_); std::vector<void*> h_peer_flag_ptrs(num_gpus_); for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; h_peer_ptrs[g] = gpu_ctx.d_ar_ptr_; h_peer_flag_ptrs[g] = gpu_ctx.d_flags_.get_ptr(); ARBufInitCmd& proxy_cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); gpu_ctx.h_rs_cmd_ = buf_init_out.h_rs_cmd_; gpu_ctx.d_ag_cmd_ = buf_init_out.d_ag_cmd_; } for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; CK_CUDA_THROW_(hipSetDevice(device_list_[g])); CK_CUDA_THROW_(hipMemcpy(gpu_ctx.d_peer_ptrs_.get_ptr(), h_peer_ptrs.data(), num_gpus_ * sizeof(void*), hipMemcpyHostToDevice)); CK_CUDA_THROW_(hipMemcpy(gpu_ctx.d_flags_ptrs_.get_ptr(), h_peer_flag_ptrs.data(), num_gpus_ * sizeof(size_t*), hipMemcpyHostToDevice)); } coll_ctx->update_size(coll_ctx->ar_size_); proxy_cmd_->reset(); } void IbComm::update_size(ARCollHandle coll, const size_t ar_size) { auto& ctx = ar_coll_ctx_[coll]; PROXY_ASSERT_MSG(ar_size < ctx->ar_size_, "updated AR size must be less than init size"); for (size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ARUpdateSizeCmd(); auto& cmd = boost::get<ARUpdateSizeCmd>(proxy_cmd_->cmd_[g]); auto& m2p_cmd = std::get<0>(cmd); m2p_cmd.ar_size_ = ar_size; m2p_cmd.coll_handle_ = coll; } proxy_cmd_->post_command(); ctx->update_size(ar_size); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); } // TODO: rs sync threads is max(SMS + 1, RANKS) #define AR_MAX_THREADS 1024 #define AR_BARRIER_FLAG_OFFSET 0 #define RS_SM_SYNC_OFFSET (RANKS) #define AG_RANK_BCAST_OFFSET (RANKS + MAX_AR_CHANNELS) #define UNROLL 6 #define RS_SYNC_THREADS 32 // MAX of AR_CHANNELS + 1 and RANKS #define AR_WORKER_THREADS (blockDim.x - RS_SYNC_THREADS) template <int RANKS, typename T> static __global__ void __launch_bounds__(AR_MAX_THREADS) all_reduce_cuda(void** __restrict__ d_peer_ptrs, const int numlines, size_t* d_coll_cmd_, size_t* h_rs_cmd_, size_t* d_ag_cmd_, size_t** flags, const int peerblocklines, const int numblocks, const int device_id) { // Do a barrier across all ranks volatile size_t* my_flag = flags[device_id]; size_t base_count = *d_coll_cmd_; if (threadIdx.x < RANKS) { if (blockIdx.x == 0) { size_t* rem_flag = flags[threadIdx.x]; rem_flag[AR_BARRIER_FLAG_OFFSET + device_id] = (base_count + 1); } while (my_flag[AR_BARRIER_FLAG_OFFSET + threadIdx.x] < (base_count + 1)) { } } if (threadIdx.x < RS_SYNC_THREADS) { __syncthreads(); // Post barrier and init sync /* sync across SMs and write a single RS complete flag to host */ for (int nblock = 0; nblock < numblocks; nblock++) { asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); size_t flag_count = (nblock + base_count + 1); if (threadIdx.x == 0) { __threadfence(); if (blockIdx.x > 0) { my_flag[RS_SM_SYNC_OFFSET + blockIdx.x] = flag_count; } } else if (blockIdx.x == 0) { if (threadIdx.x < gridDim.x) { while (((volatile size_t*)my_flag)[RS_SM_SYNC_OFFSET + threadIdx.x] < flag_count) { } } } if (blockIdx.x == 0) { asm volatile("bar.sync 2, %0;" ::"r"(RS_SYNC_THREADS)); if (threadIdx.x == 0) { *h_rs_cmd_ = flag_count; } } } /* All gather flag broadcast to all ranks */ size_t cachedflag = base_count; if ((blockIdx.x == 0) && (threadIdx.x < RANKS)) { while (cachedflag < base_count + numblocks) { size_t newflag = *(volatile size_t*)(d_ag_cmd_); if (newflag == cachedflag) continue; cachedflag = newflag; size_t* rem_flag = flags[threadIdx.x]; rem_flag[AG_RANK_BCAST_OFFSET + device_id] = cachedflag; // printf("Wrote flag from %d: %llu %x\n", device_id, cachedflag, d_peer_ptrs[device_id]); } } } else { constexpr int basethread = RS_SYNC_THREADS; const int warp = blockIdx.x + (threadIdx.x >> 5); uint4* remote_ptr[RANKS]; for (int r = 0; r < RANKS; r++) { remote_ptr[r] = reinterpret_cast<uint4*>(d_peer_ptrs[(r + device_id + warp) % RANKS]); } uint4* my_ptr = reinterpret_cast<uint4*>(d_peer_ptrs[device_id]); __syncthreads(); // Post barrier and init sync int blocklineoffset = 0; while (blocklineoffset < numlines) { /* reduce scatter */ const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; // Assumption: numlines is divisible by RANKS const int blockstart = blocklineoffset + blocklines * device_id; const int myThreadIdx = threadIdx.x - basethread; for (int line = blockIdx.x * AR_WORKER_THREADS + myThreadIdx; line < blocklines; line += AR_WORKER_THREADS * gridDim.x) { uint4 val[RANKS]; #pragma unroll for (int i = 0; i < RANKS; i++) { val[i] = remote_ptr[i][blockstart + line]; } uint4 sum = val[0]; T* s = reinterpret_cast<T*>(&sum); #pragma unroll for (int i = 1; i < RANKS; i++) { T* v = reinterpret_cast<T*>(&val[i]); #pragma unroll for (int j = 0; j < sizeof(uint4) / sizeof(T); j++) { s[j] += v[j]; } } my_ptr[blockstart + line] = sum; } asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); blocklineoffset += peerblocklines * RANKS; } // Reduce scatter { /* All gather */ const int nwarps = ((AR_WORKER_THREADS) >> 5) / (RANKS - 1); const int myblockDim = nwarps << 5; const int mywarp = ((threadIdx.x - basethread) >> 5) / (RANKS - 1); const int maxthreadIdx = myblockDim * (RANKS - 1) + basethread; const int mydest = (device_id + 1 + ((threadIdx.x - basethread) >> 5) % (RANKS - 1)) & (RANKS - 1); const int mythreadIdx = (mywarp << 5) + (threadIdx.x & 31); volatile size_t* flag = (volatile size_t*)&(my_flag[AG_RANK_BCAST_OFFSET + mydest]); uint4* dest_ptr = remote_ptr[((RANKS << 10) + mydest - device_id - warp) % RANKS]; blocklineoffset = 0; int gather_count = (base_count + 1); while (blocklineoffset < numlines) { const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; const int blockstart = blocklineoffset; uint4* myptr = &my_ptr[blockstart + blocklines * mydest]; uint4* peerptr = &dest_ptr[blockstart + blocklines * mydest]; if (threadIdx.x < maxthreadIdx) { const int start_elem = mythreadIdx + myblockDim * blockIdx.x; const int end_elem = max(start_elem, blocklines); const int aligned_elem = ((end_elem - start_elem) / (myblockDim * gridDim.x * UNROLL)) * (myblockDim * gridDim.x * UNROLL); const int end_aligned = start_elem + aligned_elem; if (mythreadIdx == 0) { while (*flag < gather_count) { } // printf("Gather flag received %llu %d %d %d %d %d %d %x\n", *flag, device_id, // blockstart, blocklines, numlines, remainder, mydest, dest_ptr); gather_count++; } asm volatile("bar.sync %0, %1;" ::"r"(3 + mydest), "r"(myblockDim)); for (int line = start_elem; line < end_aligned; line += myblockDim * gridDim.x * UNROLL) { uint4 val[UNROLL]; #pragma unroll for (int i = 0; i < UNROLL; i++) { val[i] = peerptr[line + i * myblockDim * gridDim.x]; } #pragma unroll for (int i = 0; i < UNROLL; i++) { myptr[line + i * myblockDim * gridDim.x] = val[i]; } } for (int line = end_aligned; line < end_elem; line += myblockDim * gridDim.x) { myptr[line] = peerptr[line]; } } blocklineoffset += peerblocklines * RANKS; } } // All-gather } if ((threadIdx.x == 0) && (blockIdx.x == 0)) { *d_coll_cmd_ = (base_count + numblocks); } } template <int RANKS, typename T> void IbComm::all_reduce(ARCollHandle coll, hipStream_t stream, size_t device_id) const { auto& ctx = ar_coll_ctx_[coll]; auto& gpu_ctx = ctx->ctx_[device_id]; auto warps = max(RANKS, AR_MAX_THREADS / 32); int numlines = ctx->ar_size_ / sizeof(uint4); int device_id_int = static_cast<int>(device_id); hipLaunchKernelGGL(( all_reduce_cuda<RANKS, T>), dim3(ctx->cfg_nchannels_), dim3(warps * 32), 0, stream, gpu_ctx->d_peer_ptrs_.get_ptr(), numlines, // number of 16B lines gpu_ctx->d_coll_cmd_.get_ptr(), gpu_ctx->h_rs_cmd_, gpu_ctx->d_ag_cmd_, gpu_ctx->d_flags_ptrs_.get_ptr(), ctx->peer_blocklines_, ctx->num_blocks_, device_id_int); } #define SUPPORTED_AR_RANKS (2)(4)(8)(16) template <typename T> void IbComm::all_reduce(ARCollHandle coll, hipStream_t stream, size_t device_id) { #define SWITCHER(r, data, p) \ if (p == num_gpus_) { \ return all_reduce<p, T>(coll, stream, device_id); \ } BOOST_PP_SEQ_FOR_EACH(SWITCHER, "", SUPPORTED_AR_RANKS) #undef SWITCHER PROXY_ASSERT_MSG(false, "Unsupported number of local GPU"); } #define AR_METHOD(r, data, p) \ template void IbComm::all_reduce<p, __half>(ARCollHandle, hipStream_t, size_t) const; \ template void IbComm::all_reduce<p, float>(ARCollHandle, hipStream_t, size_t) const; \ template void IbComm::all_reduce<p, uint32_t>(ARCollHandle, hipStream_t, size_t) const; BOOST_PP_SEQ_FOR_EACH(AR_METHOD, "", SUPPORTED_AR_RANKS) #undef AR_METHOD template void IbComm::all_reduce<__half>(ARCollHandle, hipStream_t, size_t); template void IbComm::all_reduce<float>(ARCollHandle, hipStream_t, size_t); template void IbComm::all_reduce<uint32_t>(ARCollHandle, hipStream_t, size_t); } // namespace HugeCTR #endif
24ae3c5b19b84eba33cb9990b261a50259afb869.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef ENABLE_MPI #include <infiniband/verbs.h> #include <boost/preprocessor.hpp> #include <collectives/ib_comm.hpp> #include <iostream> #include <sstream> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { #define MAX_AR_CHANNELS 31 IbComm::ARCollContext::ARCollContext(IbComm* comm) { size_t num_gpus = comm->num_gpus_; num_gpus_ = num_gpus; std::generate_n(std::back_inserter(ctx_), num_gpus, [] { return std::make_unique<ARCollContextPerGPU>(); }); // Read config params from env if (getenv("ONESHOT_NBLOCKS")) { cfg_nblocks_ = atoi(getenv("ONESHOT_NBLOCKS")); } if (getenv("ONESHOT_ALIGN_BLOCK")) { cfg_align_block_ = atoi(getenv("ONESHOT_ALIGN_BLOCK")); } if (getenv("ONESHOT_MIN_BLOCK")) { cfg_min_block_ = atoi(getenv("ONESHOT_MIN_BLOCK")); } if (getenv("ONESHOT_NCHANNELS")) { cfg_nchannels_ = atoi(getenv("ONESHOT_NCHANNELS")); } PROXY_ASSERT_MSG(cfg_nchannels_ <= MAX_AR_CHANNELS, "Max oneshot channels is 31"); PROXY_ASSERT(cfg_nblocks_ <= AR_MAX_BLOCKS); MESSAGE_("using oneshot nblocks: " + std::to_string(cfg_nblocks_)); MESSAGE_("using oneshot nchannels: " + std::to_string(cfg_nchannels_)); MESSAGE_("using oneshot min block: " + std::to_string(cfg_min_block_)); } void IbComm::ARCollContext::update_size(size_t ar_size) { // calculate peerblock size PROXY_ASSERT_MSG((ar_size % (num_gpus_ * 16)) == 0, "AR size needs to be aligned to num_gpus*16"); ar_size_ = ar_size; blocksize_ = (cfg_nblocks_ - 1 + (cfg_align_block_ - 1 + ar_size) / cfg_align_block_) / cfg_nblocks_; blocksize_ *= cfg_align_block_; if (blocksize_ < cfg_min_block_) { blocksize_ = cfg_min_block_; } peer_blocklines_ = blocksize_ / sizeof(uint4) / num_gpus_; num_blocks_ = (ar_size + blocksize_ - 1) / blocksize_; PROXY_ASSERT(num_blocks_ <= AR_MAX_BLOCKS); } ARCollHandle IbComm::register_ar_coll() { ar_coll_ctx_.emplace_back(std::make_unique<ARCollContext>(this)); ARCollHandle coll_handle = (ARCollHandle)(ar_coll_ctx_.size() - 1); for (size_t g = 0; g < num_gpus_; g++) { M2PARCollInit coll_init_cmd_; coll_init_cmd_.coll_handle_ = coll_handle; coll_init_cmd_.cfg_nblocks_ = ar_coll_ctx_[coll_handle]->cfg_nblocks_; coll_init_cmd_.cfg_align_block_ = ar_coll_ctx_[coll_handle]->cfg_align_block_; coll_init_cmd_.cfg_min_block_ = ar_coll_ctx_[coll_handle]->cfg_min_block_; ARCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } template <> sharp_datatype IbComm::get_sharp_dtype<int>() { return SHARP_DTYPE_INT; } template <> sharp_datatype IbComm::get_sharp_dtype<uint32_t>() { return SHARP_DTYPE_UNSIGNED; } template <> sharp_datatype IbComm::get_sharp_dtype<__half>() { return SHARP_DTYPE_FLOAT_SHORT; } template <> sharp_datatype IbComm::get_sharp_dtype<float>() { return SHARP_DTYPE_FLOAT; } template <typename T> void IbComm::set_ar_coll_buf(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id) { PROXY_ASSERT(ar_size != 0); auto& coll_ctx = *ar_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { ERROR_MESSAGE_("Proxy command is already populated. Don't mix up set API"); exit(1); } proxy_cmd_->cmd_[device_id] = ARBufInitCmd(); ARBufInitCmd& cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PARBufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_ar_ptr_ = ar_ptr; buf_init.coll_handle_ = coll; buf_init.d_ar_ptr_ = ar_ptr; buf_init.ar_size_ = ar_size; buf_init.sharp_dtype_ = get_sharp_dtype<T>(); buf_init.element_size_ = sizeof(T); if (coll_ctx.ar_size_ != 0) { PROXY_ASSERT(ar_size == coll_ctx.ar_size_); } coll_ctx.ar_size_ = ar_size; PROXY_ASSERT_MSG(((size_t)ar_ptr & 0xf) == 0, "AR pointer needs to aligned to 16B"); } template void IbComm::set_ar_coll_buf<__half>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<float>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<uint32_t>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); #define MAX_LOCAL_RANKS 32 #define TOTAL_FLAGS (2 * MAX_LOCAL_RANKS + MAX_AR_CHANNELS) void IbComm::register_ar_coll_buf(ARCollHandle coll) { auto& coll_ctx = ar_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); // Allocations for (size_t g = 0; g < num_gpus_; g++) { CK_CUDA_THROW_(cudaSetDevice(device_list_[g])); auto& gpu_ctx = *coll_ctx->ctx_[g]; gpu_ctx.buf_ = GeneralBuffer2<CudaAllocator>::create(); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_peer_ptrs_); gpu_ctx.buf_->reserve({1}, &gpu_ctx.d_coll_cmd_); gpu_ctx.buf_->reserve({TOTAL_FLAGS}, &gpu_ctx.d_flags_); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_flags_ptrs_); gpu_ctx.buf_->allocate(); CK_CUDA_THROW_(cudaMemset(gpu_ctx.buf_->get_ptr(), 0, gpu_ctx.buf_->get_size_in_bytes())); } // Get proxy output std::vector<void*> h_peer_ptrs(num_gpus_); std::vector<void*> h_peer_flag_ptrs(num_gpus_); for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; h_peer_ptrs[g] = gpu_ctx.d_ar_ptr_; h_peer_flag_ptrs[g] = gpu_ctx.d_flags_.get_ptr(); ARBufInitCmd& proxy_cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); gpu_ctx.h_rs_cmd_ = buf_init_out.h_rs_cmd_; gpu_ctx.d_ag_cmd_ = buf_init_out.d_ag_cmd_; } for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; CK_CUDA_THROW_(cudaSetDevice(device_list_[g])); CK_CUDA_THROW_(cudaMemcpy(gpu_ctx.d_peer_ptrs_.get_ptr(), h_peer_ptrs.data(), num_gpus_ * sizeof(void*), cudaMemcpyHostToDevice)); CK_CUDA_THROW_(cudaMemcpy(gpu_ctx.d_flags_ptrs_.get_ptr(), h_peer_flag_ptrs.data(), num_gpus_ * sizeof(size_t*), cudaMemcpyHostToDevice)); } coll_ctx->update_size(coll_ctx->ar_size_); proxy_cmd_->reset(); } void IbComm::update_size(ARCollHandle coll, const size_t ar_size) { auto& ctx = ar_coll_ctx_[coll]; PROXY_ASSERT_MSG(ar_size < ctx->ar_size_, "updated AR size must be less than init size"); for (size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ARUpdateSizeCmd(); auto& cmd = boost::get<ARUpdateSizeCmd>(proxy_cmd_->cmd_[g]); auto& m2p_cmd = std::get<0>(cmd); m2p_cmd.ar_size_ = ar_size; m2p_cmd.coll_handle_ = coll; } proxy_cmd_->post_command(); ctx->update_size(ar_size); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); } // TODO: rs sync threads is max(SMS + 1, RANKS) #define AR_MAX_THREADS 1024 #define AR_BARRIER_FLAG_OFFSET 0 #define RS_SM_SYNC_OFFSET (RANKS) #define AG_RANK_BCAST_OFFSET (RANKS + MAX_AR_CHANNELS) #define UNROLL 6 #define RS_SYNC_THREADS 32 // MAX of AR_CHANNELS + 1 and RANKS #define AR_WORKER_THREADS (blockDim.x - RS_SYNC_THREADS) template <int RANKS, typename T> static __global__ void __launch_bounds__(AR_MAX_THREADS) all_reduce_cuda(void** __restrict__ d_peer_ptrs, const int numlines, size_t* d_coll_cmd_, size_t* h_rs_cmd_, size_t* d_ag_cmd_, size_t** flags, const int peerblocklines, const int numblocks, const int device_id) { // Do a barrier across all ranks volatile size_t* my_flag = flags[device_id]; size_t base_count = *d_coll_cmd_; if (threadIdx.x < RANKS) { if (blockIdx.x == 0) { size_t* rem_flag = flags[threadIdx.x]; rem_flag[AR_BARRIER_FLAG_OFFSET + device_id] = (base_count + 1); } while (my_flag[AR_BARRIER_FLAG_OFFSET + threadIdx.x] < (base_count + 1)) { } } if (threadIdx.x < RS_SYNC_THREADS) { __syncthreads(); // Post barrier and init sync /* sync across SMs and write a single RS complete flag to host */ for (int nblock = 0; nblock < numblocks; nblock++) { asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); size_t flag_count = (nblock + base_count + 1); if (threadIdx.x == 0) { __threadfence(); if (blockIdx.x > 0) { my_flag[RS_SM_SYNC_OFFSET + blockIdx.x] = flag_count; } } else if (blockIdx.x == 0) { if (threadIdx.x < gridDim.x) { while (((volatile size_t*)my_flag)[RS_SM_SYNC_OFFSET + threadIdx.x] < flag_count) { } } } if (blockIdx.x == 0) { asm volatile("bar.sync 2, %0;" ::"r"(RS_SYNC_THREADS)); if (threadIdx.x == 0) { *h_rs_cmd_ = flag_count; } } } /* All gather flag broadcast to all ranks */ size_t cachedflag = base_count; if ((blockIdx.x == 0) && (threadIdx.x < RANKS)) { while (cachedflag < base_count + numblocks) { size_t newflag = *(volatile size_t*)(d_ag_cmd_); if (newflag == cachedflag) continue; cachedflag = newflag; size_t* rem_flag = flags[threadIdx.x]; rem_flag[AG_RANK_BCAST_OFFSET + device_id] = cachedflag; // printf("Wrote flag from %d: %llu %x\n", device_id, cachedflag, d_peer_ptrs[device_id]); } } } else { constexpr int basethread = RS_SYNC_THREADS; const int warp = blockIdx.x + (threadIdx.x >> 5); uint4* remote_ptr[RANKS]; for (int r = 0; r < RANKS; r++) { remote_ptr[r] = reinterpret_cast<uint4*>(d_peer_ptrs[(r + device_id + warp) % RANKS]); } uint4* my_ptr = reinterpret_cast<uint4*>(d_peer_ptrs[device_id]); __syncthreads(); // Post barrier and init sync int blocklineoffset = 0; while (blocklineoffset < numlines) { /* reduce scatter */ const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; // Assumption: numlines is divisible by RANKS const int blockstart = blocklineoffset + blocklines * device_id; const int myThreadIdx = threadIdx.x - basethread; for (int line = blockIdx.x * AR_WORKER_THREADS + myThreadIdx; line < blocklines; line += AR_WORKER_THREADS * gridDim.x) { uint4 val[RANKS]; #pragma unroll for (int i = 0; i < RANKS; i++) { val[i] = remote_ptr[i][blockstart + line]; } uint4 sum = val[0]; T* s = reinterpret_cast<T*>(&sum); #pragma unroll for (int i = 1; i < RANKS; i++) { T* v = reinterpret_cast<T*>(&val[i]); #pragma unroll for (int j = 0; j < sizeof(uint4) / sizeof(T); j++) { s[j] += v[j]; } } my_ptr[blockstart + line] = sum; } asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); blocklineoffset += peerblocklines * RANKS; } // Reduce scatter { /* All gather */ const int nwarps = ((AR_WORKER_THREADS) >> 5) / (RANKS - 1); const int myblockDim = nwarps << 5; const int mywarp = ((threadIdx.x - basethread) >> 5) / (RANKS - 1); const int maxthreadIdx = myblockDim * (RANKS - 1) + basethread; const int mydest = (device_id + 1 + ((threadIdx.x - basethread) >> 5) % (RANKS - 1)) & (RANKS - 1); const int mythreadIdx = (mywarp << 5) + (threadIdx.x & 31); volatile size_t* flag = (volatile size_t*)&(my_flag[AG_RANK_BCAST_OFFSET + mydest]); uint4* dest_ptr = remote_ptr[((RANKS << 10) + mydest - device_id - warp) % RANKS]; blocklineoffset = 0; int gather_count = (base_count + 1); while (blocklineoffset < numlines) { const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; const int blockstart = blocklineoffset; uint4* myptr = &my_ptr[blockstart + blocklines * mydest]; uint4* peerptr = &dest_ptr[blockstart + blocklines * mydest]; if (threadIdx.x < maxthreadIdx) { const int start_elem = mythreadIdx + myblockDim * blockIdx.x; const int end_elem = max(start_elem, blocklines); const int aligned_elem = ((end_elem - start_elem) / (myblockDim * gridDim.x * UNROLL)) * (myblockDim * gridDim.x * UNROLL); const int end_aligned = start_elem + aligned_elem; if (mythreadIdx == 0) { while (*flag < gather_count) { } // printf("Gather flag received %llu %d %d %d %d %d %d %x\n", *flag, device_id, // blockstart, blocklines, numlines, remainder, mydest, dest_ptr); gather_count++; } asm volatile("bar.sync %0, %1;" ::"r"(3 + mydest), "r"(myblockDim)); for (int line = start_elem; line < end_aligned; line += myblockDim * gridDim.x * UNROLL) { uint4 val[UNROLL]; #pragma unroll for (int i = 0; i < UNROLL; i++) { val[i] = peerptr[line + i * myblockDim * gridDim.x]; } #pragma unroll for (int i = 0; i < UNROLL; i++) { myptr[line + i * myblockDim * gridDim.x] = val[i]; } } for (int line = end_aligned; line < end_elem; line += myblockDim * gridDim.x) { myptr[line] = peerptr[line]; } } blocklineoffset += peerblocklines * RANKS; } } // All-gather } if ((threadIdx.x == 0) && (blockIdx.x == 0)) { *d_coll_cmd_ = (base_count + numblocks); } } template <int RANKS, typename T> void IbComm::all_reduce(ARCollHandle coll, cudaStream_t stream, size_t device_id) const { auto& ctx = ar_coll_ctx_[coll]; auto& gpu_ctx = ctx->ctx_[device_id]; auto warps = max(RANKS, AR_MAX_THREADS / 32); int numlines = ctx->ar_size_ / sizeof(uint4); int device_id_int = static_cast<int>(device_id); all_reduce_cuda<RANKS, T><<<ctx->cfg_nchannels_, warps * 32, 0, stream>>>( gpu_ctx->d_peer_ptrs_.get_ptr(), numlines, // number of 16B lines gpu_ctx->d_coll_cmd_.get_ptr(), gpu_ctx->h_rs_cmd_, gpu_ctx->d_ag_cmd_, gpu_ctx->d_flags_ptrs_.get_ptr(), ctx->peer_blocklines_, ctx->num_blocks_, device_id_int); } #define SUPPORTED_AR_RANKS (2)(4)(8)(16) template <typename T> void IbComm::all_reduce(ARCollHandle coll, cudaStream_t stream, size_t device_id) { #define SWITCHER(r, data, p) \ if (p == num_gpus_) { \ return all_reduce<p, T>(coll, stream, device_id); \ } BOOST_PP_SEQ_FOR_EACH(SWITCHER, "", SUPPORTED_AR_RANKS) #undef SWITCHER PROXY_ASSERT_MSG(false, "Unsupported number of local GPU"); } #define AR_METHOD(r, data, p) \ template void IbComm::all_reduce<p, __half>(ARCollHandle, cudaStream_t, size_t) const; \ template void IbComm::all_reduce<p, float>(ARCollHandle, cudaStream_t, size_t) const; \ template void IbComm::all_reduce<p, uint32_t>(ARCollHandle, cudaStream_t, size_t) const; BOOST_PP_SEQ_FOR_EACH(AR_METHOD, "", SUPPORTED_AR_RANKS) #undef AR_METHOD template void IbComm::all_reduce<__half>(ARCollHandle, cudaStream_t, size_t); template void IbComm::all_reduce<float>(ARCollHandle, cudaStream_t, size_t); template void IbComm::all_reduce<uint32_t>(ARCollHandle, cudaStream_t, size_t); } // namespace HugeCTR #endif
91efd0d11d6a14b44c9d2d46abd510dbc5c9d8e3.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <float.h> #include <hip/hip_runtime.h> __global__ void gpu_Heat (float *h, float *g, int N,float *residual) { // TODO: kernel computation //... extern __shared__ float res_vector[]; int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; int index = row*N+col; int res_index = threadIdx.x*blockDim.x+threadIdx.y; res_vector[res_index] = 0.0; if( row > 0 && row < (N-1) && col > 0 && col < (N-1) ){ g[index] = 0.25f *( h[row*N+(col-1)] + h[row*N+(col+1)] + h[(row-1)*N+col] + h[(row+1)*N+col] ); float diff = g[index]-h[index]; res_vector[res_index] = diff*diff; __syncthreads(); } for(unsigned int s = blockDim.x*blockDim.x/2; s> 0 ;s>>=1){ if( res_index < s) res_vector[res_index] += res_vector[res_index+s]; __syncthreads(); } if( res_index == 0){ residual[blockIdx.x*gridDim.x+blockIdx.y] = res_vector[0]; } }
91efd0d11d6a14b44c9d2d46abd510dbc5c9d8e3.cu
#include <math.h> #include <float.h> #include <cuda.h> __global__ void gpu_Heat (float *h, float *g, int N,float *residual) { // TODO: kernel computation //... extern __shared__ float res_vector[]; int row = blockIdx.x*blockDim.x + threadIdx.x; int col = blockIdx.y*blockDim.y + threadIdx.y; int index = row*N+col; int res_index = threadIdx.x*blockDim.x+threadIdx.y; res_vector[res_index] = 0.0; if( row > 0 && row < (N-1) && col > 0 && col < (N-1) ){ g[index] = 0.25f *( h[row*N+(col-1)] + h[row*N+(col+1)] + h[(row-1)*N+col] + h[(row+1)*N+col] ); float diff = g[index]-h[index]; res_vector[res_index] = diff*diff; __syncthreads(); } for(unsigned int s = blockDim.x*blockDim.x/2; s> 0 ;s>>=1){ if( res_index < s) res_vector[res_index] += res_vector[res_index+s]; __syncthreads(); } if( res_index == 0){ residual[blockIdx.x*gridDim.x+blockIdx.y] = res_vector[0]; } }
596220c2a4a64d54cd499e18ccf8258b1fdbc8bc.hip
// !!! This is a file automatically generated by hipify!!! /*! \file arrays.cu \author Andrew Kerr <arkerr@gatech.edu> \brief tests implementation of hipMallocArray(), among other things \date Feb 12, 2010 */ #include <stdlib.h> #include <stdio.h> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// bool testMemcpy(bool verbose) { bool passed = true; int width = 1024, height = 512; int errors = 0; hipChannelFormatDesc channel = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray *cuArray; hipMallocArray(&cuArray, &channel, width, height); srand(7); size_t bytes = sizeof(float) * width * height; float *hostSource = new float[width * height]; float *hostDest = new float[width * height]; for (int j = 0; j < height; j++) { float *ptr = hostSource + j * width; float *dstPtr = hostDest + j * width; for (int i = 0; i < width; i++) { float x = (float)( (rand() % 1024) / 125.0f); ptr[i] = x; dstPtr[i] = -1.0f; } } hipMemcpyToArray(cuArray, 0, 0, hostSource, bytes, hipMemcpyHostToDevice); cudaMemcpyFromArray(hostDest, cuArray, 0, 0, bytes, hipMemcpyDeviceToHost); for (int j = 0; j < height && errors < 5; j++) { float *srcPtr = hostSource + j * width; float *dstPtr = hostDest + j * width; for (int i = 0; i < width && errors < 5; i++) { float expected = srcPtr[i]; float got = dstPtr[i]; if (fabs(expected - got) > 0.001f) { ++errors; if (verbose) { printf("ERROR: (%d, %d) - expected %f, got %f\n", i, j, expected, got); fflush(stdout); } } } } hipFreeArray(cuArray); delete [] hostSource; delete [] hostDest; return passed; } ////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *arg[]) { bool result = testMemcpy(true); if (result) { printf("Test PASSED\n"); } else { printf("Test FAILED\n"); } return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////
596220c2a4a64d54cd499e18ccf8258b1fdbc8bc.cu
/*! \file arrays.cu \author Andrew Kerr <arkerr@gatech.edu> \brief tests implementation of cudaMallocArray(), among other things \date Feb 12, 2010 */ #include <stdlib.h> #include <stdio.h> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// bool testMemcpy(bool verbose) { bool passed = true; int width = 1024, height = 512; int errors = 0; cudaChannelFormatDesc channel = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray *cuArray; cudaMallocArray(&cuArray, &channel, width, height); srand(7); size_t bytes = sizeof(float) * width * height; float *hostSource = new float[width * height]; float *hostDest = new float[width * height]; for (int j = 0; j < height; j++) { float *ptr = hostSource + j * width; float *dstPtr = hostDest + j * width; for (int i = 0; i < width; i++) { float x = (float)( (rand() % 1024) / 125.0f); ptr[i] = x; dstPtr[i] = -1.0f; } } cudaMemcpyToArray(cuArray, 0, 0, hostSource, bytes, cudaMemcpyHostToDevice); cudaMemcpyFromArray(hostDest, cuArray, 0, 0, bytes, cudaMemcpyDeviceToHost); for (int j = 0; j < height && errors < 5; j++) { float *srcPtr = hostSource + j * width; float *dstPtr = hostDest + j * width; for (int i = 0; i < width && errors < 5; i++) { float expected = srcPtr[i]; float got = dstPtr[i]; if (fabs(expected - got) > 0.001f) { ++errors; if (verbose) { printf("ERROR: (%d, %d) - expected %f, got %f\n", i, j, expected, got); fflush(stdout); } } } } cudaFreeArray(cuArray); delete [] hostSource; delete [] hostDest; return passed; } ////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *arg[]) { bool result = testMemcpy(true); if (result) { printf("Test PASSED\n"); } else { printf("Test FAILED\n"); } return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////
4a817b49218da9de2280ac6a4ac92f8276ac18a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AdamUpdate(int N, int t, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, bool amsgrad, bool rectified, bool gc, Dtype mean, bool adabelief) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; if (gc) gi -= mean; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi_old = v[i]; if (adabelief) gi -= mi; float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); if (amsgrad) { if (vi < vi_old) v[i] = vi = vi_old; } if (!rectified) g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); else { Dtype rho_inf = 2.0/(1.0-beta2) - 1.0; Dtype rho_t = rho_inf - 2.0 * t * pow(beta2,t)/(1.0-pow(beta2,t)) ; if (rho_t > 4.0) { Dtype r_t = sqrt( (rho_t-4.0) * (rho_t-2.0) * rho_inf / (rho_inf - 4.0) / (rho_inf - 2.0) / rho_t); g[i] = corrected_local_rate * mi * r_t / (sqrt(vi) + eps_hat); } else { g[i] = corrected_local_rate * mi; } } } } template <typename Dtype> __global__ void AdamUpdateDecoupledWD(int N, int t, Dtype* g, Dtype* m, Dtype* v, const Dtype* param, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype lambda, Dtype nu, bool amsgrad, bool rectified, bool gc, Dtype mean, bool adabelief) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; if (gc) gi -= mean; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi_old = v[i]; if (adabelief) gi -= mi; float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); if (amsgrad) { if (vi < vi_old) v[i] = vi = vi_old; } if (!rectified) g[i] = nu * (corrected_local_rate * mi / (sqrt(vi) + eps_hat) + param[i] * lambda); else { Dtype rho_inf = 2.0/(1.0-beta2) - 1.0; Dtype rho_t = rho_inf - 2.0 * t * pow(beta2,t)/(1.0-pow(beta2,t)) ; if (rho_t > 4.0) { Dtype r_t = sqrt( (rho_t-4.0) * (rho_t-2.0) * rho_inf / (rho_inf - 4.0) / (rho_inf - 2.0) / rho_t); g[i] = nu * (corrected_local_rate * mi * r_t / (sqrt(vi) + eps_hat) + param[i] * lambda); } else { g[i] = nu * (corrected_local_rate * mi + param[i] * lambda); } } } } template <typename Dtype> void adam_update_gpu(int N, int t, Dtype* g, Dtype* m, Dtype* v, const Dtype* param, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype lambda, Dtype nu, bool amsgrad, bool decoupled_wd, bool rectified, bool gc, Dtype mean, bool adabelief) { if (!decoupled_wd) { AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, t, g, m, v, beta1, beta2, eps_hat, corrected_local_rate, amsgrad, rectified, gc, mean, adabelief); CUDA_POST_KERNEL_CHECK; } else{ AdamUpdateDecoupledWD<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, t, g, m, v, param, beta1, beta2, eps_hat, corrected_local_rate, lambda, nu, amsgrad, rectified, gc, mean, adabelief); CUDA_POST_KERNEL_CHECK; } } template void adam_update_gpu<float>(int, int, float*, float*, float*, const float*, float, float, float, float, float, float, bool, bool, bool, bool, float,bool); template void adam_update_gpu<double>(int, int, double*, double*, double*, const double*, double, double, double, double, double, double, bool, bool, bool,bool,double,bool); } // namespace caffe
4a817b49218da9de2280ac6a4ac92f8276ac18a1.cu
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AdamUpdate(int N, int t, Dtype* g, Dtype* m, Dtype* v, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, bool amsgrad, bool rectified, bool gc, Dtype mean, bool adabelief) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; if (gc) gi -= mean; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi_old = v[i]; if (adabelief) gi -= mi; float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); if (amsgrad) { if (vi < vi_old) v[i] = vi = vi_old; } if (!rectified) g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); else { Dtype rho_inf = 2.0/(1.0-beta2) - 1.0; Dtype rho_t = rho_inf - 2.0 * t * pow(beta2,t)/(1.0-pow(beta2,t)) ; if (rho_t > 4.0) { Dtype r_t = sqrt( (rho_t-4.0) * (rho_t-2.0) * rho_inf / (rho_inf - 4.0) / (rho_inf - 2.0) / rho_t); g[i] = corrected_local_rate * mi * r_t / (sqrt(vi) + eps_hat); } else { g[i] = corrected_local_rate * mi; } } } } template <typename Dtype> __global__ void AdamUpdateDecoupledWD(int N, int t, Dtype* g, Dtype* m, Dtype* v, const Dtype* param, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype lambda, Dtype nu, bool amsgrad, bool rectified, bool gc, Dtype mean, bool adabelief) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; if (gc) gi -= mean; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi_old = v[i]; if (adabelief) gi -= mi; float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); if (amsgrad) { if (vi < vi_old) v[i] = vi = vi_old; } if (!rectified) g[i] = nu * (corrected_local_rate * mi / (sqrt(vi) + eps_hat) + param[i] * lambda); else { Dtype rho_inf = 2.0/(1.0-beta2) - 1.0; Dtype rho_t = rho_inf - 2.0 * t * pow(beta2,t)/(1.0-pow(beta2,t)) ; if (rho_t > 4.0) { Dtype r_t = sqrt( (rho_t-4.0) * (rho_t-2.0) * rho_inf / (rho_inf - 4.0) / (rho_inf - 2.0) / rho_t); g[i] = nu * (corrected_local_rate * mi * r_t / (sqrt(vi) + eps_hat) + param[i] * lambda); } else { g[i] = nu * (corrected_local_rate * mi + param[i] * lambda); } } } } template <typename Dtype> void adam_update_gpu(int N, int t, Dtype* g, Dtype* m, Dtype* v, const Dtype* param, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype lambda, Dtype nu, bool amsgrad, bool decoupled_wd, bool rectified, bool gc, Dtype mean, bool adabelief) { if (!decoupled_wd) { AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, t, g, m, v, beta1, beta2, eps_hat, corrected_local_rate, amsgrad, rectified, gc, mean, adabelief); CUDA_POST_KERNEL_CHECK; } else{ AdamUpdateDecoupledWD<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, t, g, m, v, param, beta1, beta2, eps_hat, corrected_local_rate, lambda, nu, amsgrad, rectified, gc, mean, adabelief); CUDA_POST_KERNEL_CHECK; } } template void adam_update_gpu<float>(int, int, float*, float*, float*, const float*, float, float, float, float, float, float, bool, bool, bool, bool, float,bool); template void adam_update_gpu<double>(int, int, double*, double*, double*, const double*, double, double, double, double, double, double, bool, bool, bool,bool,double,bool); } // namespace caffe
9618b9d3af0fbde2390604c5e40747382a21a76a.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar (thakkarv@gatech.edu). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
9618b9d3af0fbde2390604c5e40747382a21a76a.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar (thakkarv@gatech.edu). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::ColumnMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_maximum_plus_ssrgemm_nn_t_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
1d8216e6a6a7e34953f329ade86594efa66c6799.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <iostream> #include<Windows.h> using namespace std; #pragma comment( lib,"winmm.lib" ) __global__ void Plus(float A[], float B[], float C[], int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } //183ms void test1() { DWORD start, end; start = timeGetTime(); float* A, * Ad, * B, * Bd, * C, * Cd; int n = 1024 * 1024; int size = n * sizeof(float); // CPU A = (float*)malloc(size); B = (float*)malloc(size); C = (float*)malloc(size); // for (int i = 0; i < n; i++) { A[i] = 90.0; B[i] = 10.0; } // GPU hipMalloc((void**)&Ad, size); hipMalloc((void**)&Bd, size); hipMalloc((void**)&Cd, size); // CPUGPU hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); // kernel1024*1024/512blockblock512 dim3 dimBlock(512); dim3 dimGrid(n / 512); // kernel Plus << <dimGrid, dimBlock >> > (Ad, Bd, Cd, n); // GPUCPU hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); // float max_error = 0.0; for (int i = 0; i < n; i++) { max_error += fabs(100.0 - C[i]); } cout << "max error is " << max_error << endl; // CPUGPU free(A); free(B); free(C); hipFree(Ad); hipFree(Bd); hipFree(Cd); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } __global__ void addKernel(int** C, int** A, int** B) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; if (idx < 1024 && idy < 1024) { C[idy][idx] = A[idy][idx] + B[idy][idx]; } } //14ms void test2() { DWORD start, end; int Row = 1024; int Col = 1024; start = timeGetTime(); int** A = (int**)malloc(sizeof(int*) * Row); int** B = (int**)malloc(sizeof(int*) * Row); int** C = (int**)malloc(sizeof(int*) * Row); int* dataA = (int*)malloc(sizeof(int) * Row * Col); int* dataB = (int*)malloc(sizeof(int) * Row * Col); int* dataC = (int*)malloc(sizeof(int) * Row * Col); int** d_A; int** d_B; int** d_C; int* d_dataA; int* d_dataB; int* d_dataC; //malloc device memory hipMalloc((void**)&d_A, sizeof(int**) * Row); hipMalloc((void**)&d_B, sizeof(int**) * Row); hipMalloc((void**)&d_C, sizeof(int**) * Row); hipMalloc((void**)&d_dataA, sizeof(int) * Row * Col); hipMalloc((void**)&d_dataB, sizeof(int) * Row * Col); hipMalloc((void**)&d_dataC, sizeof(int) * Row * Col); //set value for (int i = 0; i < Row * Col; i++) { dataA[i] = 90; dataB[i] = 10; } //A //A dataA for (int i = 0; i < Row; i++) { A[i] = d_dataA + Col * i; B[i] = d_dataB + Col * i; C[i] = d_dataC + Col * i; } hipMemcpy(d_A, A, sizeof(int*) * Row, hipMemcpyHostToDevice); hipMemcpy(d_B, B, sizeof(int*) * Row, hipMemcpyHostToDevice); hipMemcpy(d_C, C, sizeof(int*) * Row, hipMemcpyHostToDevice); hipMemcpy(d_dataA, dataA, sizeof(int) * Row * Col, hipMemcpyHostToDevice); hipMemcpy(d_dataB, dataB, sizeof(int) * Row * Col, hipMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col + threadPerBlock.x - 1) / threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); addKernel << <blockNumber, threadPerBlock >> > (d_C, d_A, d_B); //- hipMemcpy(dataC, d_dataC, sizeof(int) * Row * Col, hipMemcpyDeviceToHost); int max_error = 0; for (int i = 0; i < Row * Col; i++) { //printf("%d\n", dataC[i]); max_error += abs(100 - dataC[i]); } // free(A); free(B); free(C); free(dataA); free(dataB); free(dataC); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipFree(d_dataA); hipFree(d_dataB); hipFree(d_dataC); printf("max_error is %d\n", max_error); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } __global__ void matrix_mul_gpu(int* M, int* N, int* P, int width) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int sum = 0; for (int k = 0; k < width; k++) { int a = M[j * width + k]; int b = N[k * width + i]; sum += a * b; } P[j * width + i] = sum; } //234ms void test3() { DWORD start, end; int Row = 1024; int Col = 1024; start = timeGetTime(); int* A = (int*)malloc(sizeof(int) * Row * Col); int* B = (int*)malloc(sizeof(int) * Row * Col); int* C = (int*)malloc(sizeof(int) * Row * Col); //malloc device memory int* d_dataA, * d_dataB, * d_dataC; hipMalloc((void**)&d_dataA, sizeof(int) * Row * Col); hipMalloc((void**)&d_dataB, sizeof(int) * Row * Col); hipMalloc((void**)&d_dataC, sizeof(int) * Row * Col); //set value for (int i = 0; i < Row * Col; i++) { A[i] = 90; B[i] = 10; } hipMemcpy(d_dataA, A, sizeof(int) * Row * Col, hipMemcpyHostToDevice); hipMemcpy(d_dataB, B, sizeof(int) * Row * Col, hipMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col + threadPerBlock.x - 1) / threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); matrix_mul_gpu << <blockNumber, threadPerBlock >> > (d_dataA, d_dataB, d_dataC, Col); //- hipMemcpy(C, d_dataC, sizeof(int) * Row * Col, hipMemcpyDeviceToHost); // free(A); free(B); free(C); hipFree(d_dataA); hipFree(d_dataB); hipFree(d_dataC); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } int main() { test1(); test2(); test3(); return 0; }
1d8216e6a6a7e34953f329ade86594efa66c6799.cu
#include "cuda_runtime.h" #include <stdlib.h> #include <iostream> #include<Windows.h> using namespace std; #pragma comment( lib,"winmm.lib" ) __global__ void Plus(float A[], float B[], float C[], int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } //183ms void test1() { DWORD start, end; start = timeGetTime(); float* A, * Ad, * B, * Bd, * C, * Cd; int n = 1024 * 1024; int size = n * sizeof(float); // CPU端分配内存 A = (float*)malloc(size); B = (float*)malloc(size); C = (float*)malloc(size); // 初始化数组 for (int i = 0; i < n; i++) { A[i] = 90.0; B[i] = 10.0; } // GPU端分配内存 cudaMalloc((void**)&Ad, size); cudaMalloc((void**)&Bd, size); cudaMalloc((void**)&Cd, size); // CPU的数据拷贝到GPU端 cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); // 定义kernel执行配置,(1024*1024/512)个block,每个block里面有512个线程 dim3 dimBlock(512); dim3 dimGrid(n / 512); // 执行kernel Plus << <dimGrid, dimBlock >> > (Ad, Bd, Cd, n); // 将在GPU端计算好的结果拷贝回CPU端 cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); // 校验误差 float max_error = 0.0; for (int i = 0; i < n; i++) { max_error += fabs(100.0 - C[i]); } cout << "max error is " << max_error << endl; // 释放CPU端、GPU端的内存 free(A); free(B); free(C); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } __global__ void addKernel(int** C, int** A, int** B) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; if (idx < 1024 && idy < 1024) { C[idy][idx] = A[idy][idx] + B[idy][idx]; } } //14ms void test2() { DWORD start, end; int Row = 1024; int Col = 1024; start = timeGetTime(); int** A = (int**)malloc(sizeof(int*) * Row); int** B = (int**)malloc(sizeof(int*) * Row); int** C = (int**)malloc(sizeof(int*) * Row); int* dataA = (int*)malloc(sizeof(int) * Row * Col); int* dataB = (int*)malloc(sizeof(int) * Row * Col); int* dataC = (int*)malloc(sizeof(int) * Row * Col); int** d_A; int** d_B; int** d_C; int* d_dataA; int* d_dataB; int* d_dataC; //malloc device memory cudaMalloc((void**)&d_A, sizeof(int**) * Row); cudaMalloc((void**)&d_B, sizeof(int**) * Row); cudaMalloc((void**)&d_C, sizeof(int**) * Row); cudaMalloc((void**)&d_dataA, sizeof(int) * Row * Col); cudaMalloc((void**)&d_dataB, sizeof(int) * Row * Col); cudaMalloc((void**)&d_dataC, sizeof(int) * Row * Col); //set value for (int i = 0; i < Row * Col; i++) { dataA[i] = 90; dataB[i] = 10; } //将主机指针A指向设备数据位置,目的是让设备二级指针能够指向设备数据一级指针 //A 和 dataA 都传到了设备上,但是二者还没有建立对应关系 for (int i = 0; i < Row; i++) { A[i] = d_dataA + Col * i; B[i] = d_dataB + Col * i; C[i] = d_dataC + Col * i; } cudaMemcpy(d_A, A, sizeof(int*) * Row, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, sizeof(int*) * Row, cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, sizeof(int*) * Row, cudaMemcpyHostToDevice); cudaMemcpy(d_dataA, dataA, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); cudaMemcpy(d_dataB, dataB, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col + threadPerBlock.x - 1) / threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); addKernel << <blockNumber, threadPerBlock >> > (d_C, d_A, d_B); //拷贝计算数据-一级数据指针 cudaMemcpy(dataC, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost); int max_error = 0; for (int i = 0; i < Row * Col; i++) { //printf("%d\n", dataC[i]); max_error += abs(100 - dataC[i]); } //释放内存 free(A); free(B); free(C); free(dataA); free(dataB); free(dataC); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_dataA); cudaFree(d_dataB); cudaFree(d_dataC); printf("max_error is %d\n", max_error); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } __global__ void matrix_mul_gpu(int* M, int* N, int* P, int width) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int sum = 0; for (int k = 0; k < width; k++) { int a = M[j * width + k]; int b = N[k * width + i]; sum += a * b; } P[j * width + i] = sum; } //234ms void test3() { DWORD start, end; int Row = 1024; int Col = 1024; start = timeGetTime(); int* A = (int*)malloc(sizeof(int) * Row * Col); int* B = (int*)malloc(sizeof(int) * Row * Col); int* C = (int*)malloc(sizeof(int) * Row * Col); //malloc device memory int* d_dataA, * d_dataB, * d_dataC; cudaMalloc((void**)&d_dataA, sizeof(int) * Row * Col); cudaMalloc((void**)&d_dataB, sizeof(int) * Row * Col); cudaMalloc((void**)&d_dataC, sizeof(int) * Row * Col); //set value for (int i = 0; i < Row * Col; i++) { A[i] = 90; B[i] = 10; } cudaMemcpy(d_dataA, A, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); cudaMemcpy(d_dataB, B, sizeof(int) * Row * Col, cudaMemcpyHostToDevice); dim3 threadPerBlock(16, 16); dim3 blockNumber((Col + threadPerBlock.x - 1) / threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y); printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y); matrix_mul_gpu << <blockNumber, threadPerBlock >> > (d_dataA, d_dataB, d_dataC, Col); //拷贝计算数据-一级数据指针 cudaMemcpy(C, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost); //释放内存 free(A); free(B); free(C); cudaFree(d_dataA); cudaFree(d_dataB); cudaFree(d_dataC); end = timeGetTime(); cout << "total time is " << (end - start) << "ms" << endl; } int main() { test1(); test2(); test3(); return 0; }
52062ceb610fb396e1f9485886de4b84a457b249.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> //#include <pcl/gpu/utils/device/block.hpp> namespace pcl { namespace device { namespace kinfuLS { __device__ unsigned int count = 0; struct CorespSearch { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ int operator () (const int &lhs, const volatile int& rhs) const { return lhs + rhs; } }; PtrStep<float> vmap_g_curr; PtrStep<float> nmap_g_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; mutable PtrStepSz<short2> coresp; mutable int* gbuf; __device__ __forceinline__ int search () const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= coresp.cols || y >= coresp.rows) return 0; coresp.ptr (y)[x] = make_short2 (-1, -1); float3 ncurr_g; ncurr_g.x = nmap_g_curr.ptr (y)[x]; if (isnan (ncurr_g.x)) return 0; float3 vcurr_g; vcurr_g.x = vmap_g_curr.ptr (y )[x]; vcurr_g.y = vmap_g_curr.ptr (y + coresp.rows)[x]; vcurr_g.z = vmap_g_curr.ptr (y + 2 * coresp.rows)[x]; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= coresp.cols || ukr.y >= coresp.rows) return 0; float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return 0; float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float dist = norm (vcurr_g - vprev_g); if (dist > distThres) return 0; ncurr_g.y = nmap_g_curr.ptr (y + coresp.rows)[x]; ncurr_g.z = nmap_g_curr.ptr (y + 2 * coresp.rows)[x]; nprev_g.y = nmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); /*if (sine >= 1 || asinf(sine) >= angleThres) return 0;*/ if (/*sine >= 1 || */ sine >= angleThres) return 0; coresp.ptr (y)[x] = make_short2 (ukr.x, ukr.y); return 1; } __device__ __forceinline__ void reduce (int i) const { __shared__ volatile int smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); smem[tid] = i; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); __shared__ bool isLastBlockDone; if (tid == 0) { gbuf[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; __threadfence (); unsigned int value = atomicInc (&count, gridDim.x * gridDim.y); isLastBlockDone = (value == (gridDim.x * gridDim.y - 1)); } __syncthreads (); if (isLastBlockDone) { int sum = 0; int stride = Block::stride (); for (int pos = tid; pos < gridDim.x * gridDim.y; pos += stride) sum += gbuf[pos]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) { gbuf[0] = smem[0]; count = 0; } } } __device__ __forceinline__ void operator () () const { int mask = search (); //reduce(mask); if uncomment -> need to allocate and set gbuf } }; __global__ void corespKernel (const CorespSearch cs) { cs (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp) { CorespSearch cs; cs.vmap_g_curr = vmap_g_curr; cs.nmap_g_curr = nmap_g_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.coresp = coresp; dim3 block (CorespSearch::CTA_SIZE_X, CorespSearch::CTA_SIZE_Y); dim3 grid (divUp (coresp.cols, block.x), divUp (coresp.rows, block.y)); hipLaunchKernelGGL(( corespKernel), dim3(grid), dim3(block), 0, 0, cs); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } }
52062ceb610fb396e1f9485886de4b84a457b249.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> //#include <pcl/gpu/utils/device/block.hpp> namespace pcl { namespace device { namespace kinfuLS { __device__ unsigned int count = 0; struct CorespSearch { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ int operator () (const int &lhs, const volatile int& rhs) const { return lhs + rhs; } }; PtrStep<float> vmap_g_curr; PtrStep<float> nmap_g_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; mutable PtrStepSz<short2> coresp; mutable int* gbuf; __device__ __forceinline__ int search () const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= coresp.cols || y >= coresp.rows) return 0; coresp.ptr (y)[x] = make_short2 (-1, -1); float3 ncurr_g; ncurr_g.x = nmap_g_curr.ptr (y)[x]; if (isnan (ncurr_g.x)) return 0; float3 vcurr_g; vcurr_g.x = vmap_g_curr.ptr (y )[x]; vcurr_g.y = vmap_g_curr.ptr (y + coresp.rows)[x]; vcurr_g.z = vmap_g_curr.ptr (y + 2 * coresp.rows)[x]; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= coresp.cols || ukr.y >= coresp.rows) return 0; float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return 0; float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float dist = norm (vcurr_g - vprev_g); if (dist > distThres) return 0; ncurr_g.y = nmap_g_curr.ptr (y + coresp.rows)[x]; ncurr_g.z = nmap_g_curr.ptr (y + 2 * coresp.rows)[x]; nprev_g.y = nmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); /*if (sine >= 1 || asinf(sine) >= angleThres) return 0;*/ if (/*sine >= 1 || */ sine >= angleThres) return 0; coresp.ptr (y)[x] = make_short2 (ukr.x, ukr.y); return 1; } __device__ __forceinline__ void reduce (int i) const { __shared__ volatile int smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); smem[tid] = i; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); __shared__ bool isLastBlockDone; if (tid == 0) { gbuf[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; __threadfence (); unsigned int value = atomicInc (&count, gridDim.x * gridDim.y); isLastBlockDone = (value == (gridDim.x * gridDim.y - 1)); } __syncthreads (); if (isLastBlockDone) { int sum = 0; int stride = Block::stride (); for (int pos = tid; pos < gridDim.x * gridDim.y; pos += stride) sum += gbuf[pos]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) { gbuf[0] = smem[0]; count = 0; } } } __device__ __forceinline__ void operator () () const { int mask = search (); //reduce(mask); if uncomment -> need to allocate and set gbuf } }; __global__ void corespKernel (const CorespSearch cs) { cs (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp) { CorespSearch cs; cs.vmap_g_curr = vmap_g_curr; cs.nmap_g_curr = nmap_g_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.coresp = coresp; dim3 block (CorespSearch::CTA_SIZE_X, CorespSearch::CTA_SIZE_Y); dim3 grid (divUp (coresp.cols, block.x), divUp (coresp.rows, block.y)); corespKernel<<<grid, block>>>(cs); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } }
44534e87e37ea17076b53c2a946c7466693ed89a.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include <math.h> // GPU Kernel __global__ void subtractVectorGPUKernel( float* ad, float* bd, float* cd, int size ){ // Retrieve our coordinates in the block int tx = blockIdx.x * blockDim.x + threadIdx.x; // Perform if(tx<size){ cd[tx]=ad[tx] - bd[tx]; } } bool subtractVectorGPU( float* a, float* b, float* c, int size ){ // Error return value hipError_t status; // Number of bytes in the matrix. int bytes = size * sizeof(float); // Pointers to the device arrays float *ad, *bd, *cd; // Allocate memory on the device to store each matrix hipHostGetDevicePointer( (void**)&ad, a, 0 ); hipHostGetDevicePointer( (void**)&bd, b, 0 ); hipHostGetDevicePointer( (void**)&cd, c, 0 ); // Specify the size of the grid and the size of the block float dimBlock= 1024; float x = (size/dimBlock); int dimGrid = (int)ceil(x); // Launch the kernel on a size-by-size block of threads hipLaunchKernelGGL(( subtractVectorGPUKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd, size); // Wait for completion hipDeviceSynchronize(); // Check for errors status = hipGetLastError(); if (status != hipSuccess) { std::cout << "Kernel failed: " << hipGetErrorString(status) << std::endl; return false; } // Success return true; }
44534e87e37ea17076b53c2a946c7466693ed89a.cu
#include "common.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include <math.h> // GPU Kernel __global__ void subtractVectorGPUKernel( float* ad, float* bd, float* cd, int size ){ // Retrieve our coordinates in the block int tx = blockIdx.x * blockDim.x + threadIdx.x; // Perform if(tx<size){ cd[tx]=ad[tx] - bd[tx]; } } bool subtractVectorGPU( float* a, float* b, float* c, int size ){ // Error return value cudaError_t status; // Number of bytes in the matrix. int bytes = size * sizeof(float); // Pointers to the device arrays float *ad, *bd, *cd; // Allocate memory on the device to store each matrix cudaHostGetDevicePointer( (void**)&ad, a, 0 ); cudaHostGetDevicePointer( (void**)&bd, b, 0 ); cudaHostGetDevicePointer( (void**)&cd, c, 0 ); // Specify the size of the grid and the size of the block float dimBlock= 1024; float x = (size/dimBlock); int dimGrid = (int)ceil(x); // Launch the kernel on a size-by-size block of threads subtractVectorGPUKernel<<<dimGrid, dimBlock>>>(ad, bd, cd, size); // Wait for completion cudaThreadSynchronize(); // Check for errors status = cudaGetLastError(); if (status != cudaSuccess) { std::cout << "Kernel failed: " << cudaGetErrorString(status) << std::endl; return false; } // Success return true; }
992c2c0e0ec8d50ab0e1a49b4598d1bec5f92241.hip
// !!! This is a file automatically generated by hipify!!! #include "HestonCallFFTGPU.hpp" #include "HestonCUDA.hpp" #include <complex> #define _USE_MATH_DEFINES #include <cmath> #include <gsl/gsl_spline.h> #include <iostream> // NVIDIA CUDA Headers #include <hip/hip_runtime.h> #include <hip/hip_complex.h> // NVIDIA Thrust Headers (http://developer.nvidia.com/Thrust) #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> // NVIDIA CUFFT #include <hipfft.h> __host__ __device__ static __inline__ HestonCUDAPrecisionComplex simpsonWIndex(int index) { index &= 3; switch (index) { case 0: return make_complex(0.0, -1.0); case 1: return make_complex(-1.0, 0.0); case 2: return make_complex(0.0, 1.0); case 3: return make_complex(1.0, 0.0); } return make_complex(0.0, 0.0); } struct HestonCallFFTGPU_functor { HestonCUDAPrecision dKappa; HestonCUDAPrecision dTheta; HestonCUDAPrecision dSigma; HestonCUDAPrecision dRho; HestonCUDAPrecision dV0; HestonCUDAPrecision dR; HestonCUDAPrecision dT; HestonCUDAPrecision dS0; HestonCUDAPrecision dStrike; HestonCUDAPrecision dX0; HestonCUDAPrecision dAlpha; HestonCUDAPrecision dEta; HestonCUDAPrecision dB; HestonCallFFTGPU_functor( HestonCUDAPrecision dKappa, // rate of reversion HestonCUDAPrecision dTheta, // int run variance HestonCUDAPrecision dSigma, // vol of vol HestonCUDAPrecision dRho, // correlation HestonCUDAPrecision dV0, // initial variance HestonCUDAPrecision dR, // instantaneous short rate HestonCUDAPrecision dT, // time till maturity HestonCUDAPrecision dS0, // initial asset price HestonCUDAPrecision dStrike, HestonCUDAPrecision dX0, HestonCUDAPrecision dAlpha, HestonCUDAPrecision dEta, HestonCUDAPrecision dB ) : dKappa(dKappa), dTheta(dTheta), dSigma(dSigma), dRho(dRho), dV0(dV0), dR(dR), dT(dT), dS0(dS0), dStrike(dStrike), dX0(dX0), dAlpha(dAlpha), dEta(dEta), dB(dB) {} __host__ __device__ HestonCUDAPrecisionComplex operator() (int index) { HestonCUDAPrecisionComplex zI = make_complex(0.0, 1.0); HestonCUDAPrecision dU = index * dEta; HestonCUDAPrecisionComplex zV = make_complex(dU, -(dAlpha + 1.0)); HestonCUDAPrecisionComplex zZeta = mul(-0.5, add(mul(zV, zV), mul(zI, zV))); HestonCUDAPrecisionComplex zGamma = sub(dKappa, mul(dRho * dSigma, mul(zV, zI))); HestonCUDAPrecisionComplex zPHI = sqrt(sub(mul(zGamma, zGamma), mul(2.0 * dSigma * dSigma, zZeta))); HestonCUDAPrecisionComplex zA = mul(dX0 + dR * dT, mul(zI, zV)); HestonCUDAPrecisionComplex zB = mul(dV0, div(mul(2.0, mul(zZeta, sub(1.0, exp(mul(-dT, zPHI))))), sub(mul(2.0, zPHI), mul(sub(zPHI, zGamma), sub(1.0, exp(mul(-dT, zPHI))))))); HestonCUDAPrecisionComplex zC = mul(-dKappa * dTheta / (dSigma * dSigma), add(mul(2.0, log(div(sub(mul(2.0, zPHI), mul(sub(zPHI, zGamma), sub(1.0, exp(mul(-dT, zPHI))))), (mul(2.0, zPHI))))), mul(dT, sub(zPHI, zGamma)))); HestonCUDAPrecisionComplex zCharFunc = exp(add(add(zA, zB), zC)); HestonCUDAPrecisionComplex zModifiedCharFunc = div(mul(exp(-dR * dT), zCharFunc), add(dAlpha * dAlpha + dAlpha - dU * dU, make_complex(0.0, dU * (2.0 * dAlpha + 1.0)))); HestonCUDAPrecisionComplex zSimpsonW = mul(1.0 / 3.0, add(3.0, simpsonWIndex(index))); if (index == 0) zSimpsonW.x -= 1.0 / 3.0; return mul(dEta, mul(mul(exp(make_complex(0.0, dB * dU)), zModifiedCharFunc), zSimpsonW)); } }; HestonCUDAPrecision HestonCallFFTGPU( HestonCUDAPrecision dKappa, // rate of reversion HestonCUDAPrecision dTheta, // int run variance HestonCUDAPrecision dSigma, // vol of vol HestonCUDAPrecision dRho, // correlation HestonCUDAPrecision dV0, // initial variance HestonCUDAPrecision dR, // instantaneous short rate HestonCUDAPrecision dT, // time till maturity HestonCUDAPrecision dS0, // initial asset price HestonCUDAPrecision dStrike, long lN) { std::complex<HestonCUDAPrecision> zI(0.0, 1.0); HestonCUDAPrecision dX0 = log(dS0); HestonCUDAPrecision dAlpha = 1.5; // HestonCUDAPrecision dC = 600; HestonCUDAPrecision dEta = 0.25; HestonCUDAPrecision dB = M_PI / dEta; std::complex<HestonCUDAPrecision> zFFTFunc[lN]; std::complex<HestonCUDAPrecision> zPayoff[lN]; HestonCUDAPrecision dPayoff[lN]; HestonCUDAPrecision dLambda = 2 * dB / lN; HestonCUDAPrecision dPosition = (log(dStrike) + dB) / dLambda + 1; thrust::device_vector<int> dev_zFFTFuncI(lN); thrust::device_vector<HestonCUDAPrecisionComplex> dev_zFFTFunc(lN); thrust::sequence(dev_zFFTFuncI.begin(), dev_zFFTFuncI.end()); thrust::transform(dev_zFFTFuncI.begin(), dev_zFFTFuncI.end(), dev_zFFTFunc.begin(), HestonCallFFTGPU_functor(dKappa, dTheta, dSigma, dRho, dV0, dR, dT, dS0, dStrike, dX0, dAlpha, dEta, dB)); thrust::copy(dev_zFFTFunc.begin(), dev_zFFTFunc.end(), (HestonCUDAPrecisionComplex*)zFFTFunc); hipfftHandle p; HestonCUDAPrecisionComplex* cufftFFTFunc = NULL; HestonCUDAPrecisionComplex* cufftPayoff = NULL; hipMalloc((void**)&cufftFFTFunc, sizeof(HestonCUDAPrecisionComplex) * lN); hipMalloc((void**)&cufftPayoff, sizeof(HestonCUDAPrecisionComplex) * lN); hipMemcpy(cufftFFTFunc, zFFTFunc, sizeof(HestonCUDAPrecisionComplex) * lN, hipMemcpyHostToDevice); #if defined HestonCUDAPrecisionFloat hipfftPlan1d(&p, lN, HIPFFT_C2C, 1); hipfftExecC2C(p, cufftFFTFunc, cufftPayoff, HIPFFT_FORWARD); #elif defined HestonCUDAPrecisionDouble hipfftPlan1d(&p, lN, HIPFFT_Z2Z, 1); hipfftExecZ2Z(p, cufftFFTFunc, cufftPayoff, HIPFFT_FORWARD); #endif hipMemcpy(zPayoff, cufftPayoff, sizeof(HestonCUDAPrecisionComplex) * lN, hipMemcpyDeviceToHost); hipfftDestroy(p); hipFree(cufftFFTFunc); hipFree(cufftPayoff); for (int i = 0; i < lN; i++) dPayoff[i] = zPayoff[i].real(); double dCallValueM[lN]; /* wchan: replace this later w/ the appropriate BLAS vector-scalar function */ for (int i = 0; i < lN; i++) dCallValueM[i] = static_cast<double>(dPayoff[i]) / M_PI; double dLin[lN]; for (int i = 0; i < lN; i++) dLin[i] = 1.0 + i; gsl_interp_accel* acc = gsl_interp_accel_alloc(); gsl_spline* spline = gsl_spline_alloc(gsl_interp_cspline, lN); gsl_spline_init(spline, dLin, dCallValueM, lN); HestonCUDAPrecision dPrice = exp(-log(dStrike) * dAlpha) * gsl_spline_eval(spline, dPosition, acc); gsl_spline_free(spline); gsl_interp_accel_free(acc); return dPrice; }
992c2c0e0ec8d50ab0e1a49b4598d1bec5f92241.cu
#include "HestonCallFFTGPU.hpp" #include "HestonCUDA.hpp" #include <complex> #define _USE_MATH_DEFINES #include <cmath> #include <gsl/gsl_spline.h> #include <iostream> // NVIDIA CUDA Headers #include <cuda.h> #include <cuComplex.h> // NVIDIA Thrust Headers (http://developer.nvidia.com/Thrust) #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> // NVIDIA CUFFT #include <cufft.h> __host__ __device__ static __inline__ HestonCUDAPrecisionComplex simpsonWIndex(int index) { index &= 3; switch (index) { case 0: return make_complex(0.0, -1.0); case 1: return make_complex(-1.0, 0.0); case 2: return make_complex(0.0, 1.0); case 3: return make_complex(1.0, 0.0); } return make_complex(0.0, 0.0); } struct HestonCallFFTGPU_functor { HestonCUDAPrecision dKappa; HestonCUDAPrecision dTheta; HestonCUDAPrecision dSigma; HestonCUDAPrecision dRho; HestonCUDAPrecision dV0; HestonCUDAPrecision dR; HestonCUDAPrecision dT; HestonCUDAPrecision dS0; HestonCUDAPrecision dStrike; HestonCUDAPrecision dX0; HestonCUDAPrecision dAlpha; HestonCUDAPrecision dEta; HestonCUDAPrecision dB; HestonCallFFTGPU_functor( HestonCUDAPrecision dKappa, // rate of reversion HestonCUDAPrecision dTheta, // int run variance HestonCUDAPrecision dSigma, // vol of vol HestonCUDAPrecision dRho, // correlation HestonCUDAPrecision dV0, // initial variance HestonCUDAPrecision dR, // instantaneous short rate HestonCUDAPrecision dT, // time till maturity HestonCUDAPrecision dS0, // initial asset price HestonCUDAPrecision dStrike, HestonCUDAPrecision dX0, HestonCUDAPrecision dAlpha, HestonCUDAPrecision dEta, HestonCUDAPrecision dB ) : dKappa(dKappa), dTheta(dTheta), dSigma(dSigma), dRho(dRho), dV0(dV0), dR(dR), dT(dT), dS0(dS0), dStrike(dStrike), dX0(dX0), dAlpha(dAlpha), dEta(dEta), dB(dB) {} __host__ __device__ HestonCUDAPrecisionComplex operator() (int index) { HestonCUDAPrecisionComplex zI = make_complex(0.0, 1.0); HestonCUDAPrecision dU = index * dEta; HestonCUDAPrecisionComplex zV = make_complex(dU, -(dAlpha + 1.0)); HestonCUDAPrecisionComplex zZeta = mul(-0.5, add(mul(zV, zV), mul(zI, zV))); HestonCUDAPrecisionComplex zGamma = sub(dKappa, mul(dRho * dSigma, mul(zV, zI))); HestonCUDAPrecisionComplex zPHI = sqrt(sub(mul(zGamma, zGamma), mul(2.0 * dSigma * dSigma, zZeta))); HestonCUDAPrecisionComplex zA = mul(dX0 + dR * dT, mul(zI, zV)); HestonCUDAPrecisionComplex zB = mul(dV0, div(mul(2.0, mul(zZeta, sub(1.0, exp(mul(-dT, zPHI))))), sub(mul(2.0, zPHI), mul(sub(zPHI, zGamma), sub(1.0, exp(mul(-dT, zPHI))))))); HestonCUDAPrecisionComplex zC = mul(-dKappa * dTheta / (dSigma * dSigma), add(mul(2.0, log(div(sub(mul(2.0, zPHI), mul(sub(zPHI, zGamma), sub(1.0, exp(mul(-dT, zPHI))))), (mul(2.0, zPHI))))), mul(dT, sub(zPHI, zGamma)))); HestonCUDAPrecisionComplex zCharFunc = exp(add(add(zA, zB), zC)); HestonCUDAPrecisionComplex zModifiedCharFunc = div(mul(exp(-dR * dT), zCharFunc), add(dAlpha * dAlpha + dAlpha - dU * dU, make_complex(0.0, dU * (2.0 * dAlpha + 1.0)))); HestonCUDAPrecisionComplex zSimpsonW = mul(1.0 / 3.0, add(3.0, simpsonWIndex(index))); if (index == 0) zSimpsonW.x -= 1.0 / 3.0; return mul(dEta, mul(mul(exp(make_complex(0.0, dB * dU)), zModifiedCharFunc), zSimpsonW)); } }; HestonCUDAPrecision HestonCallFFTGPU( HestonCUDAPrecision dKappa, // rate of reversion HestonCUDAPrecision dTheta, // int run variance HestonCUDAPrecision dSigma, // vol of vol HestonCUDAPrecision dRho, // correlation HestonCUDAPrecision dV0, // initial variance HestonCUDAPrecision dR, // instantaneous short rate HestonCUDAPrecision dT, // time till maturity HestonCUDAPrecision dS0, // initial asset price HestonCUDAPrecision dStrike, long lN) { std::complex<HestonCUDAPrecision> zI(0.0, 1.0); HestonCUDAPrecision dX0 = log(dS0); HestonCUDAPrecision dAlpha = 1.5; // HestonCUDAPrecision dC = 600; HestonCUDAPrecision dEta = 0.25; HestonCUDAPrecision dB = M_PI / dEta; std::complex<HestonCUDAPrecision> zFFTFunc[lN]; std::complex<HestonCUDAPrecision> zPayoff[lN]; HestonCUDAPrecision dPayoff[lN]; HestonCUDAPrecision dLambda = 2 * dB / lN; HestonCUDAPrecision dPosition = (log(dStrike) + dB) / dLambda + 1; thrust::device_vector<int> dev_zFFTFuncI(lN); thrust::device_vector<HestonCUDAPrecisionComplex> dev_zFFTFunc(lN); thrust::sequence(dev_zFFTFuncI.begin(), dev_zFFTFuncI.end()); thrust::transform(dev_zFFTFuncI.begin(), dev_zFFTFuncI.end(), dev_zFFTFunc.begin(), HestonCallFFTGPU_functor(dKappa, dTheta, dSigma, dRho, dV0, dR, dT, dS0, dStrike, dX0, dAlpha, dEta, dB)); thrust::copy(dev_zFFTFunc.begin(), dev_zFFTFunc.end(), (HestonCUDAPrecisionComplex*)zFFTFunc); cufftHandle p; HestonCUDAPrecisionComplex* cufftFFTFunc = NULL; HestonCUDAPrecisionComplex* cufftPayoff = NULL; cudaMalloc((void**)&cufftFFTFunc, sizeof(HestonCUDAPrecisionComplex) * lN); cudaMalloc((void**)&cufftPayoff, sizeof(HestonCUDAPrecisionComplex) * lN); cudaMemcpy(cufftFFTFunc, zFFTFunc, sizeof(HestonCUDAPrecisionComplex) * lN, cudaMemcpyHostToDevice); #if defined HestonCUDAPrecisionFloat cufftPlan1d(&p, lN, CUFFT_C2C, 1); cufftExecC2C(p, cufftFFTFunc, cufftPayoff, CUFFT_FORWARD); #elif defined HestonCUDAPrecisionDouble cufftPlan1d(&p, lN, CUFFT_Z2Z, 1); cufftExecZ2Z(p, cufftFFTFunc, cufftPayoff, CUFFT_FORWARD); #endif cudaMemcpy(zPayoff, cufftPayoff, sizeof(HestonCUDAPrecisionComplex) * lN, cudaMemcpyDeviceToHost); cufftDestroy(p); cudaFree(cufftFFTFunc); cudaFree(cufftPayoff); for (int i = 0; i < lN; i++) dPayoff[i] = zPayoff[i].real(); double dCallValueM[lN]; /* wchan: replace this later w/ the appropriate BLAS vector-scalar function */ for (int i = 0; i < lN; i++) dCallValueM[i] = static_cast<double>(dPayoff[i]) / M_PI; double dLin[lN]; for (int i = 0; i < lN; i++) dLin[i] = 1.0 + i; gsl_interp_accel* acc = gsl_interp_accel_alloc(); gsl_spline* spline = gsl_spline_alloc(gsl_interp_cspline, lN); gsl_spline_init(spline, dLin, dCallValueM, lN); HestonCUDAPrecision dPrice = exp(-log(dStrike) * dAlpha) * gsl_spline_eval(spline, dPosition, acc); gsl_spline_free(spline); gsl_interp_accel_free(acc); return dPrice; }
836cfac787c20ca9856f4d6eeea1602c03692bd7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * The MIT License * * Copyright (c) 1997-2012 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <sci_defs/cuda_defs.h> #ifdef __cplusplus extern "C" { #endif //______________________________________________________________________ // // @brief A kernel that applies the stencil used in timeAdvance(...) // @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z) // @param domainSize a three component vector that gives the size of the domain including ghost nodes // @param ghostLayers the number of layers of ghost cells // @param phi pointer to the source phi allocated on the device // @param newphi pointer to the sink phi allocated on the device __global__ void unifiedSchedulerTestKernel(uint3 domainLow, uint3 domainHigh, uint3 domainSize, int NGC, double *phi, double *newphi) { // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; // Get the size of the data block in which the variables reside. // This is essentially the stride in the index calculations. int dx = domainSize.x; int dy = domainSize.y; // If the threads are within the bounds of the ghost layers // the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction // is streamed because it allows access of x and y elements // that are close to one another which should allow coalesced // memory accesses. if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) { for (int k = domainLow.z; k < domainHigh.z; k++) { // For an array of [ A ][ B ][ C ], we can index it thus: // (a * B * C) + (b * C) + (c * 1) int idx = INDEX3D(dx,dy,i,j,k); newphi[idx] = (1. / 6) * (phi[INDEX3D(dx,dy, (i-1), j, k)] + phi[INDEX3D(dx,dy, (i+1), j, k)] + phi[INDEX3D(dx,dy, i, (j-1), k)] + phi[INDEX3D(dx,dy, i, (j+1), k)] + phi[INDEX3D(dx,dy, i, j, (k-1))] + phi[INDEX3D(dx,dy, i, j, (k+1))]); } } } void launchUnifiedSchedulerTestKernel(dim3 dimGrid, dim3 dimBlock, hipStream_t* stream, uint3 domainLow, uint3 domainHigh, uint3 domainSize, int numGhostCells, double* d_phi, double* d_newphi) { hipLaunchKernelGGL(( unifiedSchedulerTestKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream , domainLow, domainHigh, domainSize, numGhostCells, d_phi, d_newphi); } #ifdef __cplusplus } #endif
836cfac787c20ca9856f4d6eeea1602c03692bd7.cu
/* * The MIT License * * Copyright (c) 1997-2012 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <sci_defs/cuda_defs.h> #ifdef __cplusplus extern "C" { #endif //______________________________________________________________________ // // @brief A kernel that applies the stencil used in timeAdvance(...) // @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z) // @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z) // @param domainSize a three component vector that gives the size of the domain including ghost nodes // @param ghostLayers the number of layers of ghost cells // @param phi pointer to the source phi allocated on the device // @param newphi pointer to the sink phi allocated on the device __global__ void unifiedSchedulerTestKernel(uint3 domainLow, uint3 domainHigh, uint3 domainSize, int NGC, double *phi, double *newphi) { // calculate the thread indices int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; // Get the size of the data block in which the variables reside. // This is essentially the stride in the index calculations. int dx = domainSize.x; int dy = domainSize.y; // If the threads are within the bounds of the ghost layers // the algorithm is allowed to stream along the z direction // applying the stencil to a line of cells. The z direction // is streamed because it allows access of x and y elements // that are close to one another which should allow coalesced // memory accesses. if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) { for (int k = domainLow.z; k < domainHigh.z; k++) { // For an array of [ A ][ B ][ C ], we can index it thus: // (a * B * C) + (b * C) + (c * 1) int idx = INDEX3D(dx,dy,i,j,k); newphi[idx] = (1. / 6) * (phi[INDEX3D(dx,dy, (i-1), j, k)] + phi[INDEX3D(dx,dy, (i+1), j, k)] + phi[INDEX3D(dx,dy, i, (j-1), k)] + phi[INDEX3D(dx,dy, i, (j+1), k)] + phi[INDEX3D(dx,dy, i, j, (k-1))] + phi[INDEX3D(dx,dy, i, j, (k+1))]); } } } void launchUnifiedSchedulerTestKernel(dim3 dimGrid, dim3 dimBlock, cudaStream_t* stream, uint3 domainLow, uint3 domainHigh, uint3 domainSize, int numGhostCells, double* d_phi, double* d_newphi) { unifiedSchedulerTestKernel<<< dimGrid, dimBlock, 0, *stream >>>(domainLow, domainHigh, domainSize, numGhostCells, d_phi, d_newphi); } #ifdef __cplusplus } #endif
39d78699aae64a8c07822ac3833e4ceb581fce11.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************* 11 12 Copyright (C) 2015 by Wisllay Vitrio 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ #include <vector> #include <fstream> #include <cstdio> #include <cstdlib> #include <iostream> #include <algorithm> #include <iostream> #include <iomanip> #include <tclap/CmdLine.h> #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" #include "knn_hip.cuh" #include "cuda_distances.cuh" #include <cuda.h> #include "Dataset.h" #include "cuLazyNN_Broof.cuh" #include "cuNearestNeighbors.cuh" #include <map> using namespace std; class CustomHelpVisitor : public TCLAP::HelpVisitor { protected: TCLAP::ValueArg<std::string>* _modelArg; public: CustomHelpVisitor(TCLAP::CmdLineInterface *cmd, TCLAP::CmdLineOutput **out, TCLAP::ValueArg<std::string> *modelArg) : TCLAP::HelpVisitor(cmd, out), _modelArg(modelArg) {} ; void visit() { if(!_modelArg->isSet()) TCLAP::HelpVisitor::visit(); }; }; struct FileStats { int num_docs; int num_terms; std::map<int, int> doc_to_class; FileStats() : num_docs(0), num_terms(0) {} }; FileStats readTrainingFile(std::string &file, std::vector<Entry> &entries); void readTestFile(InvertedIndex &index, FileStats &stats, std::string &file, int K, std::string distance, ofstream &fileout, ofstream &filedists); void updateStatsMaxFeatureTest(std::string &filename, FileStats &stats); bool makeQuery(InvertedIndex &inverted_index, FileStats &stats, std::string &line, int K, void (*distance)(InvertedIndex, Entry*, int*, cuSimilarity*, int D), ofstream &fileout, ofstream &filedists); void write_output(ofstream &fileout, int trueclass, int guessedclass, int docid); int get_class(std::string token); void teste_lazy_boost(std::string trainingFileName, std::string testFileName, std::string resultsFileName, int k, int trial, bool append = true, float max_features = 0.03, int n_boost_iter = 10, int n_gpus = 1){ Dataset training_set, test_set; //int correct_cosine = 0, wrong_cosine = 0; training_set.loadGtKnnFormat(trainingFileName.c_str()); cuLazyNN_Boost cLazy(training_set, max_features, n_boost_iter, n_gpus); test_set.loadGtKnnFormat(testFileName.c_str()); //double start, end, total = 0; ofstream file; if(append) file.open(resultsFileName.data(), std::ios_base::app); else file.open(resultsFileName.data()); std::vector<int> pred = cLazy.classify(test_set, k); file << "#" << trial << endl; for (int i = 0; i < pred.size(); ++i) { file << i << " CLASS=" << test_set.getSamples()[i].y << " CLASS=" << pred[i] << ":1" << endl; } //printf("Total time taken to classify all queries: %lf seconds\n", total); printf("Cosine similarity\n"); //printf("Correct: %d Wrong: %d\n", correct_cosine, wrong_cosine); //printf("Accuracy: %lf%%\n\n", double(correct_cosine) / double(test_set.size())); file.close(); } template <class InputIterator1> int size (InputIterator1 first1, InputIterator1 last1) { int counter = 0; for (; first1 != last1; ++first1) { counter++; } return counter; } template <class InputIterator1, class InputIterator2> int count_distinct (InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { int counter = 0; while (true) { if (first1==last1) return counter + size(first2,last2); if (first2==last2) return counter + size(first1,last1); if (first1->first<first2->first) { counter++; ++first1; } else if (first2->first<first1->first) { counter++; ++first2; } else { counter++; ++first1; ++first2; } } } void teste_cuNN(std::string trainingFileName, std::string testFileName, std::string resultsFileName, int k, int trial, bool append = true){ srand(time(NULL)); Dataset training_set, test_set; int tp = 0, wrong_cosine = 0; training_set.loadGtKnnFormat(trainingFileName.c_str()); cuNearestNeighbors cuNN(training_set); test_set.loadGtKnnFormat(testFileName.c_str()); double start, end, total = 0; printf("train (dim : %d, class: %d ) - test (dim: %d, class: %d) - total classes : %d \n", training_set.dimension(), training_set.num_class(), test_set.dimension(), test_set.num_class(), count_distinct(training_set.doc_per_class.begin(),training_set.doc_per_class.end(),test_set.doc_per_class.begin(),test_set.doc_per_class.end())); int test_set_size = test_set.getSamples().size(); int documents_processed = 0; std::vector<sample>::iterator end_it = test_set.sample_end(); int num_class = count_distinct(training_set.doc_per_class.begin(),training_set.doc_per_class.end(),test_set.doc_per_class.begin(),test_set.doc_per_class.end()); int **confusion_matrix = new int*[num_class]; for (int i = 0; i < num_class; ++i) { confusion_matrix[i] = new int[num_class]; for (int j = 0; j < num_class; ++j) { confusion_matrix[i][j] = 0; } } ofstream file; if(append) file.open(resultsFileName.data(), std::ios_base::app); else file.open(resultsFileName.data()); file << "#" << trial << endl; for (std::vector<sample>::iterator it = test_set.sample_begin(); it != end_it; ++it) { start = gettime(); int guessed_class = cuNN.classify(it->features, k); end = gettime(); total += end - start; confusion_matrix[it->y][guessed_class]++; if(guessed_class == it->y) { tp++; } else { wrong_cosine++; } ++documents_processed; std::cerr.precision(4); std::cerr.setf(std::ios::fixed); std::cerr << "\r" << double(documents_processed)/test_set_size * 100 << "%" << " - " << double(tp) / (documents_processed); file << documents_processed << " CLASS=" << it->y << " CLASS=" << guessed_class << ":1" << endl; } printf("\nTotal time taken to classify all queries: %lf seconds\n", total); printf("Cosine similarity\n"); printf("Correct: %d Wrong: %d\n", tp, wrong_cosine); printf("Accuracy: %lf%%\n\n", double(tp) / double(test_set_size)); int tps = 0, fps = 0, fns; double macro_avg_prec = 0, macro_avg_recall = 0; for (int i = 0; i < num_class; ++i) { int tp = confusion_matrix[i][i], fp = 0, fn = 0; for (int j = 0; j < num_class; ++j) { fp += (i != j)? confusion_matrix[i][j] : 0; fn += (i != j)? confusion_matrix[j][i] : 0; //cout << setw(5) << confusion_matrix[i][j] << " "; } //cout << endl; macro_avg_prec += (tp + fp) > 0 ? (double)tp / (tp + fp) : 0; macro_avg_recall += (tp + fn) > 0 ?(double)tp / (tp + fn) : 0; tps += tp; fps += fp; fns += fn; } double micro_avg_prec = (double)tps / (test_set_size); double micro_avg_recall = (double)tps / (tps + fns); double microF1 = 2*micro_avg_recall*micro_avg_prec / (micro_avg_recall+micro_avg_prec); macro_avg_prec /= test_set.num_class(); macro_avg_recall /= test_set.num_class(); double macroF1 = 2*macro_avg_recall*macro_avg_prec / (macro_avg_recall+macro_avg_prec); printf("microF1 : %f, macroF1 : %f\n", microF1, macroF1); for (int i = 0; i < num_class; ++i) { delete[] confusion_matrix[i]; } delete[] confusion_matrix; file.close(); } /** * Receives as parameters the training file name and the test file name */ int main(int argc, char **argv) { //initCudpp(); //initializes the CUDPP library //hipInit(0); hipDeviceSynchronize(); // Wrap everything in a try block. Do this every time, // because exceptions will be thrown for problems. try { // Define the command line object, and insert a message // that describes the program. The "Command description message" // is printed last in the help text. The second argument is the // delimiter (usually space) and the last one is the version number. // The CmdLine object parses the argv array based on the Arg objects // that it contains. TCLAP::CmdLine cmd("Command description message", ' ', "0.9"); vector<string> allowed; allowed.push_back("knn"); allowed.push_back("knn_rf"); TCLAP::ValuesConstraint<string> allowedVals( allowed ); TCLAP::ValueArg<std::string> modelArg("c", "classifier", "Classifier model (default : knn).", false, allowed[0], &allowedVals); cmd.add( modelArg ); // Define a value argument and add it to the command line. // A value arg defines a flag and a type of value that it expects, // such as "-n Bishop". TCLAP::UnlabeledValueArg<std::string> trainArg("train","Traning dataset location.", true, "", "training set"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( trainArg ); TCLAP::UnlabeledValueArg<std::string> testArg("test", "Test dataset location.", true, "", "test set"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( testArg ); TCLAP::ValueArg<std::string> resultsArg("r", "results", "Results output file (default : results.out).", false, "results.out", "string"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( resultsArg ); TCLAP::ValueArg<int> trialArg("","trial","Trial number.", false, 0, "int"); cmd.add( trialArg ); TCLAP::SwitchArg appendSwitch("a","append","Append results to result file.", cmd); TCLAP::ValueArg<int> kArg("k","K","K nearest neirghbor to be searched.(default : 30)", false, 30, "int"); cmd.add( kArg ); TCLAP::ValueArg<int> gpusArg("g","gpus","Number of GPUs.(default : 1)", false, 1, "int"); cmd.add( gpusArg ); TCLAP::ValueArg<int> iboost("i","iboost","Number of boosting iteration.(default : 10)", false, 10, "int"); cmd.add( iboost ); TCLAP::ValueArg<float> max_features("m","max_features","Number of boosting iteration.(default : 0.15)", false, 0.15, "float"); cmd.add( max_features ); TCLAP::ValueArg<int> numTreesArg("n","number-trees","Maximum number of trees in the ensemble.(default : 100)", false, 100, "int"); cmd.add( numTreesArg ); TCLAP::ValueArg<int> heightTreesArg("H","height","Maximum height of trees in the ensemble(default : 0). H=0 means unpruned otherwise prune with H top.", false, 100, "int"); cmd.add( heightTreesArg ); // Parse the argv array. cmd.parse( argc, argv ); std::string model = modelArg.getValue(); if(model == "knn_rf"){ teste_lazy_boost(trainArg.getValue(), testArg.getValue(), resultsArg.getValue(), kArg.getValue(), trialArg.getValue(), appendSwitch.getValue(), max_features.getValue(), iboost.getValue(), gpusArg.getValue()); }else{ teste_cuNN(trainArg.getValue(), testArg.getValue(), resultsArg.getValue(), kArg.getValue(), trialArg.getValue(), appendSwitch.getValue()); } } catch (TCLAP::ArgException &e) // catch any exceptions { std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl; } return EXIT_SUCCESS; } FileStats readTrainingFile(std::string &filename, std::vector<Entry> &entries) { std::ifstream input(filename.c_str()); std::string line; FileStats stats; while(!input.eof()) { std::getline(input, line); if(line == "") continue; int doc_id = stats.num_docs++; std::vector<std::string> tokens = split(line, ' '); stats.doc_to_class[doc_id] = get_class(tokens[1]); for(int i = 2, size = tokens.size(); i + 1 < size; i+=2) { int term_id = atoi(tokens[i].c_str()); int term_count = atoi(tokens[i+1].c_str()); stats.num_terms = ::max(stats.num_terms, term_id + 1); entries.push_back(Entry(doc_id, term_id, term_count)); } } input.close(); return stats; } void updateStatsMaxFeatureTest(std::string &filename, FileStats &stats) { std::ifstream input(filename.c_str()); std::string line; while(!input.eof()) { std::getline(input, line); if(line == "") continue; std::vector<std::string> tokens = split(line, ' '); for(int i = 2, size = tokens.size(); i + 1 < size; i+=2) { int term_id = atoi(tokens[i].c_str()); stats.num_terms = ::max(stats.num_terms, term_id + 1); } } } void write_output(ofstream &outputfile, int trueclass, int guessedclass, int docid) { outputfile << docid<<" CLASS="<<trueclass<<" CLASS="<<guessedclass<<":1"<<std::endl; } int get_class(std::string token) { std::vector<std::string> class_tokens = split(token, '='); if(class_tokens.size() == 1) { return atoi(class_tokens[0].c_str()); } else { return atoi(class_tokens[1].c_str()); } }
39d78699aae64a8c07822ac3833e4ceb581fce11.cu
/********************************************************************* 11 12 Copyright (C) 2015 by Wisllay Vitrio 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ #include <vector> #include <fstream> #include <cstdio> #include <cstdlib> #include <iostream> #include <algorithm> #include <iostream> #include <iomanip> #include <tclap/CmdLine.h> #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" #include "knn.cuh" #include "cuda_distances.cuh" #include <cuda.h> #include "Dataset.h" #include "cuLazyNN_Broof.cuh" #include "cuNearestNeighbors.cuh" #include <map> using namespace std; class CustomHelpVisitor : public TCLAP::HelpVisitor { protected: TCLAP::ValueArg<std::string>* _modelArg; public: CustomHelpVisitor(TCLAP::CmdLineInterface *cmd, TCLAP::CmdLineOutput **out, TCLAP::ValueArg<std::string> *modelArg) : TCLAP::HelpVisitor(cmd, out), _modelArg(modelArg) {} ; void visit() { if(!_modelArg->isSet()) TCLAP::HelpVisitor::visit(); }; }; struct FileStats { int num_docs; int num_terms; std::map<int, int> doc_to_class; FileStats() : num_docs(0), num_terms(0) {} }; FileStats readTrainingFile(std::string &file, std::vector<Entry> &entries); void readTestFile(InvertedIndex &index, FileStats &stats, std::string &file, int K, std::string distance, ofstream &fileout, ofstream &filedists); void updateStatsMaxFeatureTest(std::string &filename, FileStats &stats); bool makeQuery(InvertedIndex &inverted_index, FileStats &stats, std::string &line, int K, void (*distance)(InvertedIndex, Entry*, int*, cuSimilarity*, int D), ofstream &fileout, ofstream &filedists); void write_output(ofstream &fileout, int trueclass, int guessedclass, int docid); int get_class(std::string token); void teste_lazy_boost(std::string trainingFileName, std::string testFileName, std::string resultsFileName, int k, int trial, bool append = true, float max_features = 0.03, int n_boost_iter = 10, int n_gpus = 1){ Dataset training_set, test_set; //int correct_cosine = 0, wrong_cosine = 0; training_set.loadGtKnnFormat(trainingFileName.c_str()); cuLazyNN_Boost cLazy(training_set, max_features, n_boost_iter, n_gpus); test_set.loadGtKnnFormat(testFileName.c_str()); //double start, end, total = 0; ofstream file; if(append) file.open(resultsFileName.data(), std::ios_base::app); else file.open(resultsFileName.data()); std::vector<int> pred = cLazy.classify(test_set, k); file << "#" << trial << endl; for (int i = 0; i < pred.size(); ++i) { file << i << " CLASS=" << test_set.getSamples()[i].y << " CLASS=" << pred[i] << ":1" << endl; } //printf("Total time taken to classify all queries: %lf seconds\n", total); printf("Cosine similarity\n"); //printf("Correct: %d Wrong: %d\n", correct_cosine, wrong_cosine); //printf("Accuracy: %lf%%\n\n", double(correct_cosine) / double(test_set.size())); file.close(); } template <class InputIterator1> int size (InputIterator1 first1, InputIterator1 last1) { int counter = 0; for (; first1 != last1; ++first1) { counter++; } return counter; } template <class InputIterator1, class InputIterator2> int count_distinct (InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { int counter = 0; while (true) { if (first1==last1) return counter + size(first2,last2); if (first2==last2) return counter + size(first1,last1); if (first1->first<first2->first) { counter++; ++first1; } else if (first2->first<first1->first) { counter++; ++first2; } else { counter++; ++first1; ++first2; } } } void teste_cuNN(std::string trainingFileName, std::string testFileName, std::string resultsFileName, int k, int trial, bool append = true){ srand(time(NULL)); Dataset training_set, test_set; int tp = 0, wrong_cosine = 0; training_set.loadGtKnnFormat(trainingFileName.c_str()); cuNearestNeighbors cuNN(training_set); test_set.loadGtKnnFormat(testFileName.c_str()); double start, end, total = 0; printf("train (dim : %d, class: %d ) - test (dim: %d, class: %d) - total classes : %d \n", training_set.dimension(), training_set.num_class(), test_set.dimension(), test_set.num_class(), count_distinct(training_set.doc_per_class.begin(),training_set.doc_per_class.end(),test_set.doc_per_class.begin(),test_set.doc_per_class.end())); int test_set_size = test_set.getSamples().size(); int documents_processed = 0; std::vector<sample>::iterator end_it = test_set.sample_end(); int num_class = count_distinct(training_set.doc_per_class.begin(),training_set.doc_per_class.end(),test_set.doc_per_class.begin(),test_set.doc_per_class.end()); int **confusion_matrix = new int*[num_class]; for (int i = 0; i < num_class; ++i) { confusion_matrix[i] = new int[num_class]; for (int j = 0; j < num_class; ++j) { confusion_matrix[i][j] = 0; } } ofstream file; if(append) file.open(resultsFileName.data(), std::ios_base::app); else file.open(resultsFileName.data()); file << "#" << trial << endl; for (std::vector<sample>::iterator it = test_set.sample_begin(); it != end_it; ++it) { start = gettime(); int guessed_class = cuNN.classify(it->features, k); end = gettime(); total += end - start; confusion_matrix[it->y][guessed_class]++; if(guessed_class == it->y) { tp++; } else { wrong_cosine++; } ++documents_processed; std::cerr.precision(4); std::cerr.setf(std::ios::fixed); std::cerr << "\r" << double(documents_processed)/test_set_size * 100 << "%" << " - " << double(tp) / (documents_processed); file << documents_processed << " CLASS=" << it->y << " CLASS=" << guessed_class << ":1" << endl; } printf("\nTotal time taken to classify all queries: %lf seconds\n", total); printf("Cosine similarity\n"); printf("Correct: %d Wrong: %d\n", tp, wrong_cosine); printf("Accuracy: %lf%%\n\n", double(tp) / double(test_set_size)); int tps = 0, fps = 0, fns; double macro_avg_prec = 0, macro_avg_recall = 0; for (int i = 0; i < num_class; ++i) { int tp = confusion_matrix[i][i], fp = 0, fn = 0; for (int j = 0; j < num_class; ++j) { fp += (i != j)? confusion_matrix[i][j] : 0; fn += (i != j)? confusion_matrix[j][i] : 0; //cout << setw(5) << confusion_matrix[i][j] << " "; } //cout << endl; macro_avg_prec += (tp + fp) > 0 ? (double)tp / (tp + fp) : 0; macro_avg_recall += (tp + fn) > 0 ?(double)tp / (tp + fn) : 0; tps += tp; fps += fp; fns += fn; } double micro_avg_prec = (double)tps / (test_set_size); double micro_avg_recall = (double)tps / (tps + fns); double microF1 = 2*micro_avg_recall*micro_avg_prec / (micro_avg_recall+micro_avg_prec); macro_avg_prec /= test_set.num_class(); macro_avg_recall /= test_set.num_class(); double macroF1 = 2*macro_avg_recall*macro_avg_prec / (macro_avg_recall+macro_avg_prec); printf("microF1 : %f, macroF1 : %f\n", microF1, macroF1); for (int i = 0; i < num_class; ++i) { delete[] confusion_matrix[i]; } delete[] confusion_matrix; file.close(); } /** * Receives as parameters the training file name and the test file name */ int main(int argc, char **argv) { //initCudpp(); //initializes the CUDPP library //cuInit(0); cudaDeviceSynchronize(); // Wrap everything in a try block. Do this every time, // because exceptions will be thrown for problems. try { // Define the command line object, and insert a message // that describes the program. The "Command description message" // is printed last in the help text. The second argument is the // delimiter (usually space) and the last one is the version number. // The CmdLine object parses the argv array based on the Arg objects // that it contains. TCLAP::CmdLine cmd("Command description message", ' ', "0.9"); vector<string> allowed; allowed.push_back("knn"); allowed.push_back("knn_rf"); TCLAP::ValuesConstraint<string> allowedVals( allowed ); TCLAP::ValueArg<std::string> modelArg("c", "classifier", "Classifier model (default : knn).", false, allowed[0], &allowedVals); cmd.add( modelArg ); // Define a value argument and add it to the command line. // A value arg defines a flag and a type of value that it expects, // such as "-n Bishop". TCLAP::UnlabeledValueArg<std::string> trainArg("train","Traning dataset location.", true, "", "training set"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( trainArg ); TCLAP::UnlabeledValueArg<std::string> testArg("test", "Test dataset location.", true, "", "test set"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( testArg ); TCLAP::ValueArg<std::string> resultsArg("r", "results", "Results output file (default : results.out).", false, "results.out", "string"); // Add the argument nameArg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( resultsArg ); TCLAP::ValueArg<int> trialArg("","trial","Trial number.", false, 0, "int"); cmd.add( trialArg ); TCLAP::SwitchArg appendSwitch("a","append","Append results to result file.", cmd); TCLAP::ValueArg<int> kArg("k","K","K nearest neirghbor to be searched.(default : 30)", false, 30, "int"); cmd.add( kArg ); TCLAP::ValueArg<int> gpusArg("g","gpus","Number of GPUs.(default : 1)", false, 1, "int"); cmd.add( gpusArg ); TCLAP::ValueArg<int> iboost("i","iboost","Number of boosting iteration.(default : 10)", false, 10, "int"); cmd.add( iboost ); TCLAP::ValueArg<float> max_features("m","max_features","Number of boosting iteration.(default : 0.15)", false, 0.15, "float"); cmd.add( max_features ); TCLAP::ValueArg<int> numTreesArg("n","number-trees","Maximum number of trees in the ensemble.(default : 100)", false, 100, "int"); cmd.add( numTreesArg ); TCLAP::ValueArg<int> heightTreesArg("H","height","Maximum height of trees in the ensemble(default : 0). H=0 means unpruned otherwise prune with H top.", false, 100, "int"); cmd.add( heightTreesArg ); // Parse the argv array. cmd.parse( argc, argv ); std::string model = modelArg.getValue(); if(model == "knn_rf"){ teste_lazy_boost(trainArg.getValue(), testArg.getValue(), resultsArg.getValue(), kArg.getValue(), trialArg.getValue(), appendSwitch.getValue(), max_features.getValue(), iboost.getValue(), gpusArg.getValue()); }else{ teste_cuNN(trainArg.getValue(), testArg.getValue(), resultsArg.getValue(), kArg.getValue(), trialArg.getValue(), appendSwitch.getValue()); } } catch (TCLAP::ArgException &e) // catch any exceptions { std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl; } return EXIT_SUCCESS; } FileStats readTrainingFile(std::string &filename, std::vector<Entry> &entries) { std::ifstream input(filename.c_str()); std::string line; FileStats stats; while(!input.eof()) { std::getline(input, line); if(line == "") continue; int doc_id = stats.num_docs++; std::vector<std::string> tokens = split(line, ' '); stats.doc_to_class[doc_id] = get_class(tokens[1]); for(int i = 2, size = tokens.size(); i + 1 < size; i+=2) { int term_id = atoi(tokens[i].c_str()); int term_count = atoi(tokens[i+1].c_str()); stats.num_terms = std::max(stats.num_terms, term_id + 1); entries.push_back(Entry(doc_id, term_id, term_count)); } } input.close(); return stats; } void updateStatsMaxFeatureTest(std::string &filename, FileStats &stats) { std::ifstream input(filename.c_str()); std::string line; while(!input.eof()) { std::getline(input, line); if(line == "") continue; std::vector<std::string> tokens = split(line, ' '); for(int i = 2, size = tokens.size(); i + 1 < size; i+=2) { int term_id = atoi(tokens[i].c_str()); stats.num_terms = std::max(stats.num_terms, term_id + 1); } } } void write_output(ofstream &outputfile, int trueclass, int guessedclass, int docid) { outputfile << docid<<" CLASS="<<trueclass<<" CLASS="<<guessedclass<<":1"<<std::endl; } int get_class(std::string token) { std::vector<std::string> class_tokens = split(token, '='); if(class_tokens.size() == 1) { return atoi(class_tokens[0].c_str()); } else { return atoi(class_tokens[1].c_str()); } }
7f6d1595694198757545205816ec13d6bbe35d93.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rightUnpackingKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *temperature = NULL; hipMalloc(&temperature, XSIZE*YSIZE); double *ghost = NULL; hipMalloc(&ghost, XSIZE*YSIZE); int block_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rightUnpackingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,ghost,block_size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rightUnpackingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,ghost,block_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rightUnpackingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,ghost,block_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7f6d1595694198757545205816ec13d6bbe35d93.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rightUnpackingKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *temperature = NULL; cudaMalloc(&temperature, XSIZE*YSIZE); double *ghost = NULL; cudaMalloc(&ghost, XSIZE*YSIZE); int block_size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rightUnpackingKernel<<<gridBlock,threadBlock>>>(temperature,ghost,block_size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rightUnpackingKernel<<<gridBlock,threadBlock>>>(temperature,ghost,block_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rightUnpackingKernel<<<gridBlock,threadBlock>>>(temperature,ghost,block_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bf948bb7033c9715389d229e41b983dff8d19086.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduce(double *a,double *z, int sizeOut){ int tid = blockDim.x*blockIdx.x + threadIdx.x; if(tid > N/2)return; extern __shared__ double subTotals[]; subTotals[threadIdx.x]=(a[tid*2]+a[tid*2+1])/2;//sum every two values using all threads __syncthreads(); int level=2; while ((blockDim.x/level) >= sizeOut){//keep halving values until sizeout remains if(threadIdx.x % level==0){//use half threads every iteration subTotals[threadIdx.x]=(subTotals[threadIdx.x]+subTotals[threadIdx.x+(level/2)])/2; } __syncthreads();//we have to sync threads every time here :( level = level * 2; } level = level /2; if(threadIdx.x % level==0){ z[tid/level] = subTotals[threadIdx.x]; } }
bf948bb7033c9715389d229e41b983dff8d19086.cu
#include "includes.h" __global__ void reduce(double *a,double *z, int sizeOut){ int tid = blockDim.x*blockIdx.x + threadIdx.x; if(tid > N/2)return; extern __shared__ double subTotals[]; subTotals[threadIdx.x]=(a[tid*2]+a[tid*2+1])/2;//sum every two values using all threads __syncthreads(); int level=2; while ((blockDim.x/level) >= sizeOut){//keep halving values until sizeout remains if(threadIdx.x % level==0){//use half threads every iteration subTotals[threadIdx.x]=(subTotals[threadIdx.x]+subTotals[threadIdx.x+(level/2)])/2; } __syncthreads();//we have to sync threads every time here :( level = level * 2; } level = level /2; if(threadIdx.x % level==0){ z[tid/level] = subTotals[threadIdx.x]; } }
b1efd9e31931d93ee83c0f5234c1711ca80dcab8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 6 //Poisson Blending #include "utils.h" #include "stdio.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #define N_ITERATIONS 800 /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ __global__ void jacobi(const uchar4 *src, const uchar4 *dest, float *r1, float *r2, float*g1, float *g2, float* b1, float* b2, unsigned char *border, unsigned char *interior, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; // Column int y_i = threadIdx.y + blockIdx.y * blockDim.y; // Row // observe array bounds if(x_i > 0 && x_i < numCols-1){ if(y_i > 0 && y_i < numRows-1){ int i = y_i * numCols + x_i; // Row-ordered index in array if(border[i]){ r2[i] = dest[i].x; g2[i] = dest[i].y; b2[i] = dest[i].z; } // pixel must be in interior if(interior[i]){ // cue horrible workload imbalance... int neighbor_i; float rSum = 0; float gSum = 0; float bSum = 0; float num_neighbors = 0; // 1 neighbor_i = i - numCols; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 2 neighbor_i = i + numCols; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 3 neighbor_i = i - 1; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 4 neighbor_i = i + 1; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // divide by num neighbors rSum /= num_neighbors; gSum /= num_neighbors; bSum /= num_neighbors; //clamp to [0, 255] r2[i] = min(255.f, max(0.f, rSum)); g2[i] = min(255.f, max(0.f, gSum)); b2[i] = min(255.f, max(0.f, bSum)); } } } } // __device__ void guess_pixel(float *r1, float* r2, unsigned char *border, // unsigned char *interior, const size_t numCols){ // // } /* 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ __global__ void mask_source(const uchar4* const h_src, unsigned char* d_mask, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ // set to 1 if not white d_mask[i] = (h_src[i].x + h_src[i].y + h_src[i].z < 3 * 255) ? 1 : 0; } } /* Debug function. Create image of a mask. values 1 in mask get white in image */ __global__ void visualize_mask(unsigned char* d_mask, uchar4* d_out, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ if(d_mask[i]){ d_out[i].x = 255; d_out[i].y = 255; d_out[i].z = 255; } } } /* 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ __global__ void mask_interior(unsigned char* mask, unsigned char* border, unsigned char* strictInterior, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; // Column int y_i = threadIdx.y + blockIdx.y * blockDim.y; // Row int i = y_i * numCols + x_i; // Row-ordered index in array // observe array bounds if(x_i > 0 && x_i < numCols-1){ if(y_i > 0 && y_i < numRows-1){ // pixel must be inside mask if(mask[i]){ // all neighbors must be in mask if(mask[(y_i - 1)* numCols + x_i] && mask[(y_i + 1) * numCols + x_i] && mask[y_i * numCols + x_i -1] && mask[y_i * numCols + x_i + 1]){ strictInterior[i] = 1; border[i] = 0; } else { strictInterior[i] = 0; border[i] = 1; } } else { strictInterior[i] = 0; border[i] = 0; } } } } /* 3) Separate out the incoming image into three separate channels TODO: remove? */ __global__ void separate_channels(const uchar4* d_src, unsigned char* d_red, unsigned char* d_green, unsigned char* d_blue, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; if (x_i < numCols && y_i < numRows){ int i = x_i + y_i * numCols; d_red[i] = d_src[i].x; d_green[i] = d_src[i].y; d_blue[i] = d_src[i].z; } } /* 3) Separate out the incoming image into three separate channels 4) Create two float buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ __global__ void init_buffers(const uchar4* d_src, float* r1, float* r2, float* g1, float* g2, float* b1, float* b2, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; if (x_i < numCols && y_i < numRows){ int i = x_i + y_i * numCols; r1[i] = d_src[i].x; r2[i] = d_src[i].x; g1[i] = d_src[i].y; g2[i] = d_src[i].y; b1[i] = d_src[i].z; b2[i] = d_src[i].z; } } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ __global__ void combine_image(uchar4* d_out, unsigned char *interior, float *d_red, float *d_green, float *d_blue, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ if(interior[i]){ d_out[i].x = (unsigned char) d_red[i]; d_out[i].y = (unsigned char) d_green[i]; d_out[i].z = (unsigned char) d_blue[i]; } } } void swap(float **a, float **b){ float* tmp = *b; *b = *a; *a = tmp; } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { // device variables unsigned char* d_src_mask, *d_border, *d_strictInterior; unsigned char *d_redSrc, *d_greenSrc, *d_blueSrc; float *d_red1, *d_red2, *d_green1, *d_green2, *d_blue1, *d_blue2; uchar4 *d_src, *d_dest, *d_mask_test; // computation vars const unsigned int size = numColsSource * numRowsSource; const unsigned int size_char = size * sizeof(unsigned char); const unsigned int size_float = size * sizeof(float); const dim3 threads(32, 32); const dim3 blocks(ceil((float)numColsSource/threads.x), ceil((float)numRowsSource/threads.y)); // declare memory checkCudaErrors(hipMalloc(&d_src_mask, size_char)); checkCudaErrors(hipMalloc(&d_border, size_char)); checkCudaErrors(hipMalloc(&d_strictInterior, size_char)); checkCudaErrors(hipMalloc(&d_redSrc, size_char)); checkCudaErrors(hipMalloc(&d_greenSrc, size_char)); checkCudaErrors(hipMalloc(&d_blueSrc, size_char)); // channel buffers checkCudaErrors(hipMalloc(&d_red1, size_float)); checkCudaErrors(hipMalloc(&d_red2, size_float)); checkCudaErrors(hipMalloc(&d_green1, size_float)); checkCudaErrors(hipMalloc(&d_green2, size_float)); checkCudaErrors(hipMalloc(&d_blue1, size_float)); checkCudaErrors(hipMalloc(&d_blue2, size_float)); // init masks checkCudaErrors(hipMemset(d_src_mask, 0, size_char)); checkCudaErrors(hipMemset(d_border, 0, size_char)); checkCudaErrors(hipMemset(d_strictInterior, 0, size_char)); // images on device checkCudaErrors(hipMalloc(&d_src, size * sizeof(uchar4))); checkCudaErrors(hipMemcpy(d_src, h_sourceImg, size * sizeof(uchar4), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc(&d_dest, size * sizeof(uchar4))); checkCudaErrors(hipMemcpy(d_dest, h_destImg, size * sizeof(uchar4), hipMemcpyHostToDevice)); /* 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ hipLaunchKernelGGL(( mask_source), dim3(blocks), dim3(threads), 0, 0, d_src, d_src_mask, numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ hipLaunchKernelGGL(( mask_interior), dim3(blocks), dim3(threads), 0, 0, d_src_mask, d_border, d_strictInterior, numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* 3) Separate out the incoming image into three separate channels TODO: remove and combine with 4? */ // separate_channels<<<blocks, threads>>>(d_src, d_redSrc, d_greenSrc, // d_blueSrc, numRowsSource, numColsSource); /* 4) Create two float buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ hipLaunchKernelGGL(( init_buffers), dim3(blocks), dim3(threads), 0, 0, d_src, d_red1, d_red2, d_green1, d_green2, d_blue1, d_blue2, numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); /* 5) For each color channel perform the Jacobi iteration described above 800 times. */ for(int n=0; n<N_ITERATIONS; n++){ hipLaunchKernelGGL(( jacobi), dim3(blocks), dim3(threads), 0, 0, d_src, d_dest, d_red1, d_red2, d_green1, d_green2, d_blue1, d_blue2, d_border, d_strictInterior, numRowsSource, numColsSource); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // swap pointers, new vals (2) become previous vals (1) swap(&d_red1, &d_red2); swap(&d_green1, &d_green2); swap(&d_blue1, &d_blue2); } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ hipLaunchKernelGGL(( combine_image), dim3(blocks), dim3(threads), 0, 0, d_dest, d_strictInterior, d_red2, d_green2, d_blue2, numRowsSource, numColsSource); checkCudaErrors(hipMemcpy(h_blendedImg, d_dest, size * sizeof(uchar4), hipMemcpyDeviceToHost)); // // DEBUG MASK // // checkCudaErrors(hipMalloc(&d_mask_test, size * sizeof(uchar4))); // checkCudaErrors(hipMemset(d_mask_test, 0, size * sizeof(uchar4))); // first arg can be: d_src_mask, d_border, d_strictInterior // visualize_mask<<<blocks, threads>>>(d_border, d_mask_test, numRowsSource, numColsSource); // checkCudaErrors(hipMemcpy(h_blendedImg, d_mask_test, size * sizeof(uchar4), hipMemcpyDeviceToHost)); // checkCudaErrors(hipFree(d_mask_test)); // END DEBUG // free allocated memory checkCudaErrors(hipFree(d_src_mask)); checkCudaErrors(hipFree(d_src)); checkCudaErrors(hipFree(d_dest)); checkCudaErrors(hipFree(d_border)); checkCudaErrors(hipFree(d_strictInterior)); checkCudaErrors(hipFree(d_redSrc)); checkCudaErrors(hipFree(d_greenSrc)); checkCudaErrors(hipFree(d_blueSrc)); checkCudaErrors(hipFree(d_red1)); checkCudaErrors(hipFree(d_red2)); checkCudaErrors(hipFree(d_green1)); checkCudaErrors(hipFree(d_green2)); checkCudaErrors(hipFree(d_blue1)); checkCudaErrors(hipFree(d_blue2)); }
b1efd9e31931d93ee83c0f5234c1711ca80dcab8.cu
//Udacity HW 6 //Poisson Blending #include "utils.h" #include "stdio.h" #include <thrust/host_vector.h> #include "reference_calc.cpp" #define N_ITERATIONS 800 /* Background ========== The goal for this assignment is to take one image (the source) and paste it into another image (the destination) attempting to match the two images so that the pasting is non-obvious. This is known as a "seamless clone". The basic ideas are as follows: 1) Figure out the interior and border of the source image 2) Use the values of the border pixels in the destination image as boundary conditions for solving a Poisson equation that tells us how to blend the images. No pixels from the destination except pixels on the border are used to compute the match. Solving the Poisson Equation ============================ There are multiple ways to solve this equation - we choose an iterative method - specifically the Jacobi method. Iterative methods start with a guess of the solution and then iterate to try and improve the guess until it stops changing. If the problem was well-suited for the method then it will stop and where it stops will be the solution. The Jacobi method is the simplest iterative method and converges slowly - that is we need a lot of iterations to get to the answer, but it is the easiest method to write. Jacobi Iterations ================= Our initial guess is going to be the source image itself. This is a pretty good guess for what the blended image will look like and it means that we won't have to do as many iterations compared to if we had started far from the final solution. ImageGuess_prev (Floating point) ImageGuess_next (Floating point) DestinationImg SourceImg Follow these steps to implement one iteration: 1) For every pixel p in the interior, compute two sums over the four neighboring pixels: Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor] else if the neighbor in on the border then += DestinationImg[neighbor] Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) 2) Calculate the new pixel value: float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255] In this assignment we will do 800 iterations. */ __global__ void jacobi(const uchar4 *src, const uchar4 *dest, float *r1, float *r2, float*g1, float *g2, float* b1, float* b2, unsigned char *border, unsigned char *interior, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; // Column int y_i = threadIdx.y + blockIdx.y * blockDim.y; // Row // observe array bounds if(x_i > 0 && x_i < numCols-1){ if(y_i > 0 && y_i < numRows-1){ int i = y_i * numCols + x_i; // Row-ordered index in array if(border[i]){ r2[i] = dest[i].x; g2[i] = dest[i].y; b2[i] = dest[i].z; } // pixel must be in interior if(interior[i]){ // cue horrible workload imbalance... int neighbor_i; float rSum = 0; float gSum = 0; float bSum = 0; float num_neighbors = 0; // 1 neighbor_i = i - numCols; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 2 neighbor_i = i + numCols; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 3 neighbor_i = i - 1; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // 4 neighbor_i = i + 1; if(interior[neighbor_i]) { // add prev neighbors (A) rSum += r1[neighbor_i]; gSum += g1[neighbor_i]; bSum += b1[neighbor_i]; num_neighbors += 1; } else if (border[neighbor_i]) { // add dest val (B) rSum += dest[neighbor_i].x; gSum += dest[neighbor_i].y; bSum += dest[neighbor_i].z; num_neighbors += 1; } // sum += SourceImg[p] - SourceImg[neighbor] (for all four neighbors) // (C) rSum += src[i].x - src[neighbor_i].x; gSum += src[i].y - src[neighbor_i].y; bSum += src[i].z - src[neighbor_i].z; // divide by num neighbors rSum /= num_neighbors; gSum /= num_neighbors; bSum /= num_neighbors; //clamp to [0, 255] r2[i] = min(255.f, max(0.f, rSum)); g2[i] = min(255.f, max(0.f, gSum)); b2[i] = min(255.f, max(0.f, bSum)); } } } } // __device__ void guess_pixel(float *r1, float* r2, unsigned char *border, // unsigned char *interior, const size_t numCols){ // // } /* 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ __global__ void mask_source(const uchar4* const h_src, unsigned char* d_mask, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ // set to 1 if not white d_mask[i] = (h_src[i].x + h_src[i].y + h_src[i].z < 3 * 255) ? 1 : 0; } } /* Debug function. Create image of a mask. values 1 in mask get white in image */ __global__ void visualize_mask(unsigned char* d_mask, uchar4* d_out, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ if(d_mask[i]){ d_out[i].x = 255; d_out[i].y = 255; d_out[i].z = 255; } } } /* 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ __global__ void mask_interior(unsigned char* mask, unsigned char* border, unsigned char* strictInterior, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; // Column int y_i = threadIdx.y + blockIdx.y * blockDim.y; // Row int i = y_i * numCols + x_i; // Row-ordered index in array // observe array bounds if(x_i > 0 && x_i < numCols-1){ if(y_i > 0 && y_i < numRows-1){ // pixel must be inside mask if(mask[i]){ // all neighbors must be in mask if(mask[(y_i - 1)* numCols + x_i] && mask[(y_i + 1) * numCols + x_i] && mask[y_i * numCols + x_i -1] && mask[y_i * numCols + x_i + 1]){ strictInterior[i] = 1; border[i] = 0; } else { strictInterior[i] = 0; border[i] = 1; } } else { strictInterior[i] = 0; border[i] = 0; } } } } /* 3) Separate out the incoming image into three separate channels TODO: remove? */ __global__ void separate_channels(const uchar4* d_src, unsigned char* d_red, unsigned char* d_green, unsigned char* d_blue, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; if (x_i < numCols && y_i < numRows){ int i = x_i + y_i * numCols; d_red[i] = d_src[i].x; d_green[i] = d_src[i].y; d_blue[i] = d_src[i].z; } } /* 3) Separate out the incoming image into three separate channels 4) Create two float buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ __global__ void init_buffers(const uchar4* d_src, float* r1, float* r2, float* g1, float* g2, float* b1, float* b2, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; if (x_i < numCols && y_i < numRows){ int i = x_i + y_i * numCols; r1[i] = d_src[i].x; r2[i] = d_src[i].x; g1[i] = d_src[i].y; g2[i] = d_src[i].y; b1[i] = d_src[i].z; b2[i] = d_src[i].z; } } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ __global__ void combine_image(uchar4* d_out, unsigned char *interior, float *d_red, float *d_green, float *d_blue, const size_t numRows, const size_t numCols){ int x_i = threadIdx.x + blockIdx.x * blockDim.x; int y_i = threadIdx.y + blockIdx.y * blockDim.y; int i = x_i + y_i * numCols; if(i < numRows * numCols){ if(interior[i]){ d_out[i].x = (unsigned char) d_red[i]; d_out[i].y = (unsigned char) d_green[i]; d_out[i].z = (unsigned char) d_blue[i]; } } } void swap(float **a, float **b){ float* tmp = *b; *b = *a; *a = tmp; } void your_blend(const uchar4* const h_sourceImg, //IN const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg, //IN uchar4* const h_blendedImg) //OUT { // device variables unsigned char* d_src_mask, *d_border, *d_strictInterior; unsigned char *d_redSrc, *d_greenSrc, *d_blueSrc; float *d_red1, *d_red2, *d_green1, *d_green2, *d_blue1, *d_blue2; uchar4 *d_src, *d_dest, *d_mask_test; // computation vars const unsigned int size = numColsSource * numRowsSource; const unsigned int size_char = size * sizeof(unsigned char); const unsigned int size_float = size * sizeof(float); const dim3 threads(32, 32); const dim3 blocks(ceil((float)numColsSource/threads.x), ceil((float)numRowsSource/threads.y)); // declare memory checkCudaErrors(cudaMalloc(&d_src_mask, size_char)); checkCudaErrors(cudaMalloc(&d_border, size_char)); checkCudaErrors(cudaMalloc(&d_strictInterior, size_char)); checkCudaErrors(cudaMalloc(&d_redSrc, size_char)); checkCudaErrors(cudaMalloc(&d_greenSrc, size_char)); checkCudaErrors(cudaMalloc(&d_blueSrc, size_char)); // channel buffers checkCudaErrors(cudaMalloc(&d_red1, size_float)); checkCudaErrors(cudaMalloc(&d_red2, size_float)); checkCudaErrors(cudaMalloc(&d_green1, size_float)); checkCudaErrors(cudaMalloc(&d_green2, size_float)); checkCudaErrors(cudaMalloc(&d_blue1, size_float)); checkCudaErrors(cudaMalloc(&d_blue2, size_float)); // init masks checkCudaErrors(cudaMemset(d_src_mask, 0, size_char)); checkCudaErrors(cudaMemset(d_border, 0, size_char)); checkCudaErrors(cudaMemset(d_strictInterior, 0, size_char)); // images on device checkCudaErrors(cudaMalloc(&d_src, size * sizeof(uchar4))); checkCudaErrors(cudaMemcpy(d_src, h_sourceImg, size * sizeof(uchar4), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc(&d_dest, size * sizeof(uchar4))); checkCudaErrors(cudaMemcpy(d_dest, h_destImg, size * sizeof(uchar4), cudaMemcpyHostToDevice)); /* 1) Compute a mask of the pixels from the source image to be copied The pixels that shouldn't be copied are completely white, they have R=255, G=255, B=255. Any other pixels SHOULD be copied. */ mask_source<<<blocks, threads>>>(d_src, d_src_mask, numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* 2) Compute the interior and border regions of the mask. An interior pixel has all 4 neighbors also inside the mask. A border pixel is in the mask itself, but has at least one neighbor that isn't. */ mask_interior<<<blocks, threads>>>(d_src_mask, d_border, d_strictInterior, numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* 3) Separate out the incoming image into three separate channels TODO: remove and combine with 4? */ // separate_channels<<<blocks, threads>>>(d_src, d_redSrc, d_greenSrc, // d_blueSrc, numRowsSource, numColsSource); /* 4) Create two float buffers for each color channel that will act as our guesses. Initialize them to the respective color channel of the source image since that will act as our intial guess. */ init_buffers<<<blocks, threads>>>(d_src, d_red1, d_red2, d_green1, d_green2, d_blue1, d_blue2, numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); /* 5) For each color channel perform the Jacobi iteration described above 800 times. */ for(int n=0; n<N_ITERATIONS; n++){ jacobi<<<blocks, threads>>>(d_src, d_dest, d_red1, d_red2, d_green1, d_green2, d_blue1, d_blue2, d_border, d_strictInterior, numRowsSource, numColsSource); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // swap pointers, new vals (2) become previous vals (1) swap(&d_red1, &d_red2); swap(&d_green1, &d_green2); swap(&d_blue1, &d_blue2); } /* 6) Create the output image by replacing all the interior pixels in the destination image with the result of the Jacobi iterations. Just cast the floating point values to unsigned chars since we have already made sure to clamp them to the correct range. */ combine_image<<<blocks, threads>>>(d_dest, d_strictInterior, d_red2, d_green2, d_blue2, numRowsSource, numColsSource); checkCudaErrors(cudaMemcpy(h_blendedImg, d_dest, size * sizeof(uchar4), cudaMemcpyDeviceToHost)); // // DEBUG MASK // // checkCudaErrors(cudaMalloc(&d_mask_test, size * sizeof(uchar4))); // checkCudaErrors(cudaMemset(d_mask_test, 0, size * sizeof(uchar4))); // first arg can be: d_src_mask, d_border, d_strictInterior // visualize_mask<<<blocks, threads>>>(d_border, d_mask_test, numRowsSource, numColsSource); // checkCudaErrors(cudaMemcpy(h_blendedImg, d_mask_test, size * sizeof(uchar4), cudaMemcpyDeviceToHost)); // checkCudaErrors(cudaFree(d_mask_test)); // END DEBUG // free allocated memory checkCudaErrors(cudaFree(d_src_mask)); checkCudaErrors(cudaFree(d_src)); checkCudaErrors(cudaFree(d_dest)); checkCudaErrors(cudaFree(d_border)); checkCudaErrors(cudaFree(d_strictInterior)); checkCudaErrors(cudaFree(d_redSrc)); checkCudaErrors(cudaFree(d_greenSrc)); checkCudaErrors(cudaFree(d_blueSrc)); checkCudaErrors(cudaFree(d_red1)); checkCudaErrors(cudaFree(d_red2)); checkCudaErrors(cudaFree(d_green1)); checkCudaErrors(cudaFree(d_green2)); checkCudaErrors(cudaFree(d_blue1)); checkCudaErrors(cudaFree(d_blue2)); }
c922913c0b8e80d9e4ff6be50ad342a78cda7db7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScanUp(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i+1); if (index % p2 == 0) { dev_idata[index + p2 - 1] += dev_idata[index + p - 1]; } } __global__ void kernScanDown(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); //index = index - 1; if (index % p2 == 0) { int t = dev_idata[index + p -1]; dev_idata[index + p - 1] = dev_idata[index + p2 - 1]; dev_idata[index + p2 - 1] += t; } } void kernScan(int m, int o, int* dev_idata) { dim3 fullBlocksPerGrid((o + blockSize - 1) / blockSize); for (int i = 0; i < m; i++) { kernScanUp << <fullBlocksPerGrid, blockSize >> >(o, i, dev_idata); } hipMemset(dev_idata+o - 1, 0, sizeof(int)); for (int i = m - 1; i > -1; i--) { kernScanDown << <fullBlocksPerGrid, blockSize >> >(o, i, dev_idata); } } __global__ void newkernScanUp(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); dev_idata[index*p2 + p2 - 1] += dev_idata[index*p2 + p - 1]; } __global__ void newkernScanDown(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); int t = dev_idata[index*p2 + p - 1]; dev_idata[index*p2 + p - 1] = dev_idata[index*p2 + p2 - 1]; dev_idata[index*p2 + p2 - 1] += t; } void newkernScan(int m, int o, int* dev_idata) { dim3 fullBlocksPerGrid;//((o + blockSize - 1) / blockSize); int p2; for (int i = 0; i < m; i++) { p2 = 1 << (i + 1); fullBlocksPerGrid = ((o / p2 + blockSize - 1) / blockSize); newkernScanUp << <fullBlocksPerGrid, blockSize >> >(o / p2, i, dev_idata); } hipMemset(dev_idata + o - 1, 0, sizeof(int)); for (int i = m - 1; i > -1; i--) { p2 = 1 << (i + 1); fullBlocksPerGrid = ((o / p2 + blockSize - 1) / blockSize); newkernScanDown << <fullBlocksPerGrid, blockSize >> >(o / p2, i, dev_idata); } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO int *dev_idata, *idataall; int m = ilog2ceil(n); int o = 1 << (m); idataall = (int*)malloc(o * sizeof(int)); for (int i = 0; i < n; i++) { idataall[i] = idata[i]; } for (int i = n; i < o; i++) { idataall[i] = 0; } hipMalloc((void**)&dev_idata, o * sizeof(int)); checkCUDAError("hipMalloc dev_idata failed!"); hipMemcpy(dev_idata, idataall, o * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); newkernScan(m, o, dev_idata); timer().endGpuTimer(); hipMemcpy(odata, dev_idata, o * sizeof(int), hipMemcpyDeviceToHost); //exclusive->inclusive /*for (int i = 1; i < n; i++) { odata[i-1] = idataall[i]; } if (n != o) { odata[n - 1] = idataall[n]; } else { odata[n - 1] = idataall[n - 1] + idata[n - 1]; }*/ free(idataall); hipFree(dev_idata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int *dev_idata, *dev_bools, *dev_odata, *dev_indices, *idataall; int m = ilog2ceil(n); int o = 1 << (m); idataall = (int*)malloc(o * sizeof(int)); for (int i = 0; i < n; i++) { idataall[i] = idata[i]; } for (int i = n; i < o; i++) { idataall[i] = 0; } hipMalloc((void**)&dev_idata, o * sizeof(int)); checkCUDAError("hipMalloc dev_idata failed!"); hipMalloc((void**)&dev_bools, o * sizeof(int)); checkCUDAError("hipMalloc dev_bools failed!"); hipMalloc((void**)&dev_odata, o * sizeof(int)); checkCUDAError("hipMalloc dev_odata failed!"); hipMalloc((void**)&dev_indices, o * sizeof(int)); checkCUDAError("hipMalloc dev_indices failed!"); hipMemcpy(dev_idata, idataall, o * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); dim3 fullBlocksPerGrid((o + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(o, dev_bools, dev_idata); hipMemcpy(dev_indices, dev_bools, o * sizeof(int), hipMemcpyDeviceToDevice); newkernScan(m, o, dev_indices); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(o, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); hipMemcpy(odata, dev_odata, o * sizeof(int), hipMemcpyDeviceToHost); int num = 0; for (int i = 0; i < n; i++) { if (odata[i]) { num++; } else { break; } } free(idataall); hipFree(dev_idata); hipFree(dev_bools); hipFree(dev_odata); hipFree(dev_indices); return num; //return -1; } } }
c922913c0b8e80d9e4ff6be50ad342a78cda7db7.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScanUp(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i+1); if (index % p2 == 0) { dev_idata[index + p2 - 1] += dev_idata[index + p - 1]; } } __global__ void kernScanDown(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); //index = index - 1; if (index % p2 == 0) { int t = dev_idata[index + p -1]; dev_idata[index + p - 1] = dev_idata[index + p2 - 1]; dev_idata[index + p2 - 1] += t; } } void kernScan(int m, int o, int* dev_idata) { dim3 fullBlocksPerGrid((o + blockSize - 1) / blockSize); for (int i = 0; i < m; i++) { kernScanUp << <fullBlocksPerGrid, blockSize >> >(o, i, dev_idata); } cudaMemset(dev_idata+o - 1, 0, sizeof(int)); for (int i = m - 1; i > -1; i--) { kernScanDown << <fullBlocksPerGrid, blockSize >> >(o, i, dev_idata); } } __global__ void newkernScanUp(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); dev_idata[index*p2 + p2 - 1] += dev_idata[index*p2 + p - 1]; } __global__ void newkernScanDown(int n, int i, int* dev_idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } int p = 1 << (i); int p2 = 1 << (i + 1); int t = dev_idata[index*p2 + p - 1]; dev_idata[index*p2 + p - 1] = dev_idata[index*p2 + p2 - 1]; dev_idata[index*p2 + p2 - 1] += t; } void newkernScan(int m, int o, int* dev_idata) { dim3 fullBlocksPerGrid;//((o + blockSize - 1) / blockSize); int p2; for (int i = 0; i < m; i++) { p2 = 1 << (i + 1); fullBlocksPerGrid = ((o / p2 + blockSize - 1) / blockSize); newkernScanUp << <fullBlocksPerGrid, blockSize >> >(o / p2, i, dev_idata); } cudaMemset(dev_idata + o - 1, 0, sizeof(int)); for (int i = m - 1; i > -1; i--) { p2 = 1 << (i + 1); fullBlocksPerGrid = ((o / p2 + blockSize - 1) / blockSize); newkernScanDown << <fullBlocksPerGrid, blockSize >> >(o / p2, i, dev_idata); } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO int *dev_idata, *idataall; int m = ilog2ceil(n); int o = 1 << (m); idataall = (int*)malloc(o * sizeof(int)); for (int i = 0; i < n; i++) { idataall[i] = idata[i]; } for (int i = n; i < o; i++) { idataall[i] = 0; } cudaMalloc((void**)&dev_idata, o * sizeof(int)); checkCUDAError("cudaMalloc dev_idata failed!"); cudaMemcpy(dev_idata, idataall, o * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); newkernScan(m, o, dev_idata); timer().endGpuTimer(); cudaMemcpy(odata, dev_idata, o * sizeof(int), cudaMemcpyDeviceToHost); //exclusive->inclusive /*for (int i = 1; i < n; i++) { odata[i-1] = idataall[i]; } if (n != o) { odata[n - 1] = idataall[n]; } else { odata[n - 1] = idataall[n - 1] + idata[n - 1]; }*/ free(idataall); cudaFree(dev_idata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int *dev_idata, *dev_bools, *dev_odata, *dev_indices, *idataall; int m = ilog2ceil(n); int o = 1 << (m); idataall = (int*)malloc(o * sizeof(int)); for (int i = 0; i < n; i++) { idataall[i] = idata[i]; } for (int i = n; i < o; i++) { idataall[i] = 0; } cudaMalloc((void**)&dev_idata, o * sizeof(int)); checkCUDAError("cudaMalloc dev_idata failed!"); cudaMalloc((void**)&dev_bools, o * sizeof(int)); checkCUDAError("cudaMalloc dev_bools failed!"); cudaMalloc((void**)&dev_odata, o * sizeof(int)); checkCUDAError("cudaMalloc dev_odata failed!"); cudaMalloc((void**)&dev_indices, o * sizeof(int)); checkCUDAError("cudaMalloc dev_indices failed!"); cudaMemcpy(dev_idata, idataall, o * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); dim3 fullBlocksPerGrid((o + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(o, dev_bools, dev_idata); cudaMemcpy(dev_indices, dev_bools, o * sizeof(int), cudaMemcpyDeviceToDevice); newkernScan(m, o, dev_indices); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(o, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); cudaMemcpy(odata, dev_odata, o * sizeof(int), cudaMemcpyDeviceToHost); int num = 0; for (int i = 0; i < n; i++) { if (odata[i]) { num++; } else { break; } } free(idataall); cudaFree(dev_idata); cudaFree(dev_bools); cudaFree(dev_odata); cudaFree(dev_indices); return num; //return -1; } } }
5e39fc2eb6325db554e6eec30b3b71efaca69ec0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/device_functions.h> #include "device_launch_parameters.h" #include<math.h> #include <stdio.h> #include<time.h> #include <iostream> #include <stdlib.h> #include "GpuTimer.h" using namespace std; #define BLOCK_SIZE 16 #define TILE_WIDTH BLOCK_SIZE //since the tile is of BLOCK_SIZE elements in each direction //Compute C=A*B // Serial implementation for running on CPU using a single thread. void MatrixMultiplyCpu(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numARows; i++) { for (int j = 0; j < numBColumns; j++) { float Cvalue = 0; for (int k = 0; k < numAColumns; k++) { Cvalue += A[i*numAColumns + k] * B[k*numBColumns + j]; } C[i*numCColumns + j] = Cvalue; } } } //GPU Kernel for Tiled Matrix Multiplication __global__ void TiledMatrixMultiply(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int Row = blockIdx.y * blockDim.y + threadIdx.y; //calculate row index int Col = blockIdx.x * blockDim.x + threadIdx.x; //calculate column index int n = numAColumns - 1; float Cvalue = 0; for (int p = 0; p < n / TILE_WIDTH + 1; ++p) { // where p is the phase if (p * TILE_WIDTH + threadIdx.x < numAColumns && Row < numARows) ds_A[threadIdx.y][threadIdx.x] = A[Row*numAColumns + p*TILE_WIDTH + threadIdx.x]; else ds_A[threadIdx.y][threadIdx.x] = 0.0; if (p * TILE_WIDTH + threadIdx.y < numBColumns && Col < numBColumns) ds_B[threadIdx.y][threadIdx.x] = B[(p*TILE_WIDTH + threadIdx.y)*numBColumns + Col]; else ds_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); if (Row < numARows && Col < numBColumns) for (int k = 0; k < TILE_WIDTH; ++k) { Cvalue += ds_A[threadIdx.y][k] * ds_B[k][threadIdx.x]; __syncthreads(); } } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Cvalue; } int main(void) { hipError_t err = hipSuccess; int numARows = 960; // number of rows in the matrix A int numAColumns = 640; // number of columns in the matrix A int numBRows = 640; // number of rows in the matrix B int numBColumns = 800; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) //@@ Insert Your Code Here to Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //Allocate the host memory for the input and output matrices float *h_A = (float *)malloc(sizeof(float)*numARows*numAColumns); float *h_B = (float *)malloc(sizeof(float)*numBRows*numBColumns); float *h_C = (float *)malloc(sizeof(float)*numCRows*numCColumns); float *h_C_CPU = (float *)malloc(sizeof(float)*numCRows*numCColumns); //Random Initialize Matrix A. //There are several ways to do this, such as making functions for manual input or using random numbers. //In this case, we simply use a for loop to fill the cells with trigonometric values of the indices: // Set the Seed for the random number generator rand() //srand(clock()); for (int i = 0; i<numARows; i++) { for (int j = 0; j<numAColumns; j++) { //h_A[i*numAColumns+j]=(float)rand() /(float)(RAND_MAX)*4.0; h_A[i*numAColumns + j] = sin(i); } } //Initialize Matrix B for (int i = 0; i<numBRows; i++) { for (int j = 0; j<numBColumns; j++) { //h_B[i*numBColumns+j]=(float)rand() /(float)(RAND_MAX) *4.0; h_B[i*numBColumns + j] = cos(j); } } //Allocate memory on the device for input and output matrices and record the needed time float *d_A, *d_B, *d_C; GpuTimer timer; timer.Start(); //@@Insert Your Code Here to allocate memory for d_A, d_B, d_C float sizeA = numARows * numAColumns * sizeof(float); float sizeB = numBRows * numBColumns * sizeof(float); float sizeC = numCRows * numCColumns * sizeof(float); err = hipMalloc((void **)&d_A, sizeA * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_B, sizeB * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_C, sizeC * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer.Stop(); printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed()); //Copy the input matrices A and B from the host to the device and record the needed time GpuTimer timer1; timer1.Start(); //@@ Insert Your Code Here to copy matrices A and B from Host to Device hipMemcpy(d_A, h_A, sizeA, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizeB, hipMemcpyHostToDevice); timer1.Stop(); printf("Time to copy the Matrix from the host to the device is: %f msecs.\n", timer1.Elapsed()); //Do the Processing on the GPU //@@ Insert Kernel Execution Configuration Parameters dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 gridDim((numCColumns - 1) / BLOCK_SIZE + 1, (numCRows - 1) / BLOCK_SIZE + 1, 1); //Invoke the TiledMatrixMultiply kernel and record the needed time for its execution GpuTimer timer2; timer2.Start(); //@@ Insert Your Code Here for Kernel Invocation TiledMatrixMultiply << < gridDim, dimBlock >> > (d_A, d_B, d_C, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); timer2.Stop(); printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed()); //Copy resulting matrix from device to host and record the needed time GpuTimer timer3; timer3.Start(); //@@ Insert Your Code Here to Copy the resulting Matrix d_C from device to the Host h_C hipMemcpy(h_C, d_C, sizeC, hipMemcpyDeviceToHost); timer3.Stop(); printf("Time to copy the resulting Matrix from the device to the host is: %f msecs.\n", timer3.Elapsed()); //Do the Processing on the CPU clock_t begin = clock(); //@@ Insert Your Code Here to call the CPU function MatrixMultiplyCpu where the resulting matrix is h_C_CPU MatrixMultiplyCpu(h_A, h_B, h_C_CPU, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000; printf("Implemented CPU serial code ran in: %f msecs.\n", time_spent); //Verify Results Computed by GPU and CPU for (int i = 0; i<numCRows; i++) { for (int j = 0; j<numCColumns; j++) { if (fabs(h_C_CPU[i*numCColumns + j] - h_C[i*numCColumns + j]) > 1e-2) { fprintf(stderr, "Result verification failed at element (%d,%d)!\n", i, j); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); //Free host memory free(h_A); free(h_B); free(h_C); free(h_C_CPU); //Free device memory //@@ Insert Your Code Here to Free Device Memory free(d_A); free(d_B); free(d_C); return 0; }
5e39fc2eb6325db554e6eec30b3b71efaca69ec0.cu
#include "cuda_runtime.h" #include <device_functions.h> #include "device_launch_parameters.h" #include<math.h> #include <stdio.h> #include<time.h> #include <iostream> #include <stdlib.h> #include "GpuTimer.h" using namespace std; #define BLOCK_SIZE 16 #define TILE_WIDTH BLOCK_SIZE //since the tile is of BLOCK_SIZE elements in each direction //Compute C=A*B // Serial implementation for running on CPU using a single thread. void MatrixMultiplyCpu(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { for (int i = 0; i < numARows; i++) { for (int j = 0; j < numBColumns; j++) { float Cvalue = 0; for (int k = 0; k < numAColumns; k++) { Cvalue += A[i*numAColumns + k] * B[k*numBColumns + j]; } C[i*numCColumns + j] = Cvalue; } } } //GPU Kernel for Tiled Matrix Multiplication __global__ void TiledMatrixMultiply(float* A, float* B, float* C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int Row = blockIdx.y * blockDim.y + threadIdx.y; //calculate row index int Col = blockIdx.x * blockDim.x + threadIdx.x; //calculate column index int n = numAColumns - 1; float Cvalue = 0; for (int p = 0; p < n / TILE_WIDTH + 1; ++p) { // where p is the phase if (p * TILE_WIDTH + threadIdx.x < numAColumns && Row < numARows) ds_A[threadIdx.y][threadIdx.x] = A[Row*numAColumns + p*TILE_WIDTH + threadIdx.x]; else ds_A[threadIdx.y][threadIdx.x] = 0.0; if (p * TILE_WIDTH + threadIdx.y < numBColumns && Col < numBColumns) ds_B[threadIdx.y][threadIdx.x] = B[(p*TILE_WIDTH + threadIdx.y)*numBColumns + Col]; else ds_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); if (Row < numARows && Col < numBColumns) for (int k = 0; k < TILE_WIDTH; ++k) { Cvalue += ds_A[threadIdx.y][k] * ds_B[k][threadIdx.x]; __syncthreads(); } } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Cvalue; } int main(void) { cudaError_t err = cudaSuccess; int numARows = 960; // number of rows in the matrix A int numAColumns = 640; // number of columns in the matrix A int numBRows = 640; // number of rows in the matrix B int numBColumns = 800; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) //@@ Insert Your Code Here to Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //Allocate the host memory for the input and output matrices float *h_A = (float *)malloc(sizeof(float)*numARows*numAColumns); float *h_B = (float *)malloc(sizeof(float)*numBRows*numBColumns); float *h_C = (float *)malloc(sizeof(float)*numCRows*numCColumns); float *h_C_CPU = (float *)malloc(sizeof(float)*numCRows*numCColumns); //Random Initialize Matrix A. //There are several ways to do this, such as making functions for manual input or using random numbers. //In this case, we simply use a for loop to fill the cells with trigonometric values of the indices: // Set the Seed for the random number generator rand() //srand(clock()); for (int i = 0; i<numARows; i++) { for (int j = 0; j<numAColumns; j++) { //h_A[i*numAColumns+j]=(float)rand() /(float)(RAND_MAX)*4.0; h_A[i*numAColumns + j] = sin(i); } } //Initialize Matrix B for (int i = 0; i<numBRows; i++) { for (int j = 0; j<numBColumns; j++) { //h_B[i*numBColumns+j]=(float)rand() /(float)(RAND_MAX) *4.0; h_B[i*numBColumns + j] = cos(j); } } //Allocate memory on the device for input and output matrices and record the needed time float *d_A, *d_B, *d_C; GpuTimer timer; timer.Start(); //@@Insert Your Code Here to allocate memory for d_A, d_B, d_C float sizeA = numARows * numAColumns * sizeof(float); float sizeB = numBRows * numBColumns * sizeof(float); float sizeC = numCRows * numCColumns * sizeof(float); err = cudaMalloc((void **)&d_A, sizeA * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_B, sizeB * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_C, sizeC * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer.Stop(); printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed()); //Copy the input matrices A and B from the host to the device and record the needed time GpuTimer timer1; timer1.Start(); //@@ Insert Your Code Here to copy matrices A and B from Host to Device cudaMemcpy(d_A, h_A, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeB, cudaMemcpyHostToDevice); timer1.Stop(); printf("Time to copy the Matrix from the host to the device is: %f msecs.\n", timer1.Elapsed()); //Do the Processing on the GPU //@@ Insert Kernel Execution Configuration Parameters dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 gridDim((numCColumns - 1) / BLOCK_SIZE + 1, (numCRows - 1) / BLOCK_SIZE + 1, 1); //Invoke the TiledMatrixMultiply kernel and record the needed time for its execution GpuTimer timer2; timer2.Start(); //@@ Insert Your Code Here for Kernel Invocation TiledMatrixMultiply << < gridDim, dimBlock >> > (d_A, d_B, d_C, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); timer2.Stop(); printf("Implemented CUDA code ran in: %f msecs.\n", timer2.Elapsed()); //Copy resulting matrix from device to host and record the needed time GpuTimer timer3; timer3.Start(); //@@ Insert Your Code Here to Copy the resulting Matrix d_C from device to the Host h_C cudaMemcpy(h_C, d_C, sizeC, cudaMemcpyDeviceToHost); timer3.Stop(); printf("Time to copy the resulting Matrix from the device to the host is: %f msecs.\n", timer3.Elapsed()); //Do the Processing on the CPU clock_t begin = clock(); //@@ Insert Your Code Here to call the CPU function MatrixMultiplyCpu where the resulting matrix is h_C_CPU MatrixMultiplyCpu(h_A, h_B, h_C_CPU, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000; printf("Implemented CPU serial code ran in: %f msecs.\n", time_spent); //Verify Results Computed by GPU and CPU for (int i = 0; i<numCRows; i++) { for (int j = 0; j<numCColumns; j++) { if (fabs(h_C_CPU[i*numCColumns + j] - h_C[i*numCColumns + j]) > 1e-2) { fprintf(stderr, "Result verification failed at element (%d,%d)!\n", i, j); exit(EXIT_FAILURE); } } } printf("Test PASSED\n"); //Free host memory free(h_A); free(h_B); free(h_C); free(h_C_CPU); //Free device memory //@@ Insert Your Code Here to Free Device Memory free(d_A); free(d_B); free(d_C); return 0; }
dd371ade91c56882a58efabbf43a5734dd6bbc71.hip
// !!! This is a file automatically generated by hipify!!! /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/sgemv2.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "gemv2_core.cuh" #if(SM >= 30) #define sgemvn_nb (32) #define sgemvn_ntcol (4) #define sgemvn_ept (4) #define sgemvn_width (sgemvn_ntcol*sgemvn_ept) #define sgemvn_by (8) #define sgemvt_nb (32) #define sgemvt_ntcol (4) #define sgemvt_ept (8) #define sgemvt_width (sgemvt_ntcol*sgemvt_ept) #define sgemvt_by (4) #else #define sgemvn_nb (64) #define sgemvn_ntcol (8) #define sgemvn_ept (2) #define sgemvn_width (sgemvn_ntcol*sgemvn_ept) #define sgemvn_by (1) #define sgemvt_nb (64) #define sgemvt_ntcol (8) #define sgemvt_ept (2) #define sgemvt_width (sgemvt_ntcol*sgemvt_ept) #define sgemvt_by (1) #endif extern "C" int kblas_sscal_async(int n, float alpha, float *x, int incx, hipStream_t stream); int kblas_sgemv2_driver( char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy, hipStream_t stream) { if(trans == 'n' || trans == 'N') { // scaling with beta kblas_sscal_async(rows, beta, dY, incy, stream); int mod_r = rows % sgemvn_nb; int mod_c = cols % sgemvn_width; int blocks = rows/sgemvn_nb; if(mod_r != 0) blocks += 1; const int thread_x = sgemvn_nb; const int thread_y = sgemvn_ntcol; const int ept = sgemvn_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, sgemvn_by); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 1:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 2:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 3:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 4:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 5:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 6:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 7:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 8:hipLaunchKernelGGL(( gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; default: printf("irregular part %d is not supported, please extend the case statement of sgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // scaling with beta kblas_sscal_async(cols, beta, dY, incy, stream); int mod_r = rows % sgemvt_nb; int mod_c = cols % sgemvt_width; int blocks = cols/sgemvt_width; if(mod_c != 0) blocks += 1; const int thread_x = sgemvt_nb; const int thread_y = sgemvt_ntcol; const int ept = sgemvt_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, sgemvt_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); switch(ept_) { case 0:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 1:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 2:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 3:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 4:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 5:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 6:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 7:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 8:hipLaunchKernelGGL(( gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; default: printf("irregular part %d is not supported, please extend the case statement of sgemv\n", ept_); exit(1); } } else { printf("SGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_sgemv2(char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy) { return kblas_sgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0); } extern "C" int kblas_sgemv2_async( char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy, hipStream_t stream) { return kblas_sgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream); }
dd371ade91c56882a58efabbf43a5734dd6bbc71.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/sgemv2.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 2.0.0 * @author Ahmad Abdelfattah * @date 2017-11-13 **/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cublas.h> #include "gemv2_core.cuh" #if(SM >= 30) #define sgemvn_nb (32) #define sgemvn_ntcol (4) #define sgemvn_ept (4) #define sgemvn_width (sgemvn_ntcol*sgemvn_ept) #define sgemvn_by (8) #define sgemvt_nb (32) #define sgemvt_ntcol (4) #define sgemvt_ept (8) #define sgemvt_width (sgemvt_ntcol*sgemvt_ept) #define sgemvt_by (4) #else #define sgemvn_nb (64) #define sgemvn_ntcol (8) #define sgemvn_ept (2) #define sgemvn_width (sgemvn_ntcol*sgemvn_ept) #define sgemvn_by (1) #define sgemvt_nb (64) #define sgemvt_ntcol (8) #define sgemvt_ept (2) #define sgemvt_width (sgemvt_ntcol*sgemvt_ept) #define sgemvt_by (1) #endif extern "C" int kblas_sscal_async(int n, float alpha, float *x, int incx, cudaStream_t stream); int kblas_sgemv2_driver( char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy, cudaStream_t stream) { if(trans == 'n' || trans == 'N') { // scaling with beta kblas_sscal_async(rows, beta, dY, incy, stream); int mod_r = rows % sgemvn_nb; int mod_c = cols % sgemvn_width; int blocks = rows/sgemvn_nb; if(mod_r != 0) blocks += 1; const int thread_x = sgemvn_nb; const int thread_y = sgemvn_ntcol; const int ept = sgemvn_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, sgemvn_by); switch(ept_) { case 0: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 1: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 2: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 3: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 4: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 5: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 6: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 7: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; case 8: gemvn<float, sgemvn_nb, sgemvn_ntcol, ept, sgemvn_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold); break; default: printf("irregular part %d is not supported, please extend the case statement of sgemv\n", ept_); exit(1); } } // end of non-transpose case else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C') { // scaling with beta kblas_sscal_async(cols, beta, dY, incy, stream); int mod_r = rows % sgemvt_nb; int mod_c = cols % sgemvt_width; int blocks = cols/sgemvt_width; if(mod_c != 0) blocks += 1; const int thread_x = sgemvt_nb; const int thread_y = sgemvt_ntcol; const int ept = sgemvt_ept; int threshold = mod_c / ept; int ept_ = mod_c % ept; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks, sgemvt_by); int conj; if(trans == 'c' || trans == 'C')conj = 1; else conj = 0; //printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_); switch(ept_) { case 0: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 1: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 2: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 3: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 4: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 5: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 6: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 7: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; case 8: gemvt<float, sgemvt_nb, sgemvt_ntcol, ept, sgemvt_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj); break; default: printf("irregular part %d is not supported, please extend the case statement of sgemv\n", ept_); exit(1); } } else { printf("SGEMV error: Unrecognized transpose mode %c \n", trans); return -1; } return 0; } extern "C" int kblas_sgemv2(char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy) { return kblas_sgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0); } extern "C" int kblas_sgemv2_async( char trans, int rows, int cols, float alpha, float *dA, int lda, float *dX, int incx, float beta, float *dY, int incy, cudaStream_t stream) { return kblas_sgemv2_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream); }
2abaccf3eb8ebf5aeff52c3989e1506109924739.hip
// !!! This is a file automatically generated by hipify!!! /* This file contains routines for Parallel vector operations. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/ #include <petsc/private/cudavecimpl.h> /*MC VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise. Options Database Keys: . -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDestroy_MPICUDA(Vec v) { Vec_MPI *vecmpi = (Vec_MPI*)v->data; Vec_CUDA *veccuda; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; if (v->spptr) { veccuda = (Vec_CUDA*)v->spptr; if (veccuda->GPUarray_allocated) { err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err); veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err); } if (v->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr); ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); v->pinned_memory = PETSC_FALSE; } ierr = PetscFree(v->spptr);CHKERRQ(ierr); } ierr = VecDestroy_MPI(v);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z) { PetscReal sum,work = 0.0; PetscErrorCode ierr; PetscFunctionBegin; if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecNorm_SeqCUDA(xin,NORM_2,&work); work *= work; ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = PetscSqrtReal(sum); } else if (type == NORM_1) { /* Find the local part */ ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { /* Find the local max */ ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { PetscReal temp[2]; ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr); temp[1] = temp[1]*temp[1]; ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); z[1] = PetscSqrtReal(z[1]); } PetscFunctionReturn(0); } PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z) { PetscScalar awork[128],*work = awork; PetscErrorCode ierr; PetscFunctionBegin; if (nv > 128) { ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr); } ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr); ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); if (nv > 128) { ierr = PetscFree(work);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA Options Database Keys: . -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v) { PetscErrorCode ierr; Vec_MPI *vw,*w = (Vec_MPI*)win->data; PetscScalar *array; PetscFunctionBegin; ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr); vw = (Vec_MPI*)(*v)->data; ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr); /* save local representation of the parallel vector (and scatter) if it exists */ if (w->localrep) { ierr = VecGetArray(*v,&array);CHKERRQ(ierr); ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr); ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr); ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr); vw->localupdate = w->localupdate; if (vw->localupdate) { ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr); } } /* New vector should inherit stashing property of parent */ (*v)->stash.donotstash = win->stash.donotstash; (*v)->stash.ignorenegidx = win->stash.ignorenegidx; /* change type_name appropriately */ ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr); (*v)->map->bs = PetscAbs(win->map->bs); (*v)->bstash.bs = win->bstash.bs; PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm) { PetscErrorCode ierr; PetscScalar work[2],sum[2]; PetscFunctionBegin; ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr); *dp = sum[0]; *nm = sum[1]; PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA(Vec vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr); ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; PetscFunctionReturn(0); } PetscErrorCode VecCreate_CUDA(Vec v) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr); if (size == 1) { ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr); } else { ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArray - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - array - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. If the user-provided array is NULL, then VecCUDAPlaceArray() can be used at a later stage to SET the array for storing the vector values. PETSc does NOT free the array when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray() @*/ PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector"); ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = VecCreate(comm,vv);CHKERRQ(ierr); ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr); ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - cpuarray - the user provided CPU array to store the vector values - gpuarray - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: If both cpuarray and gpuarray are provided, the caller must ensure that the provided arrays have identical values. Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. PETSc does NOT free the provided arrays when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(), VecCUDAAllocateCheckHost() @*/ PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr); if (cpuarray && gpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_BOTH; } else if (cpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_CPU; } else if (gpuarray) { (*vv)->offloadmask = PETSC_OFFLOAD_GPU; } else { (*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } PetscFunctionReturn(0); } PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin) { PetscErrorCode ierr; PetscFunctionBegin; V->boundtocpu = pin; if (pin) { ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr); V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */ V->ops->dotnorm2 = NULL; V->ops->waxpy = VecWAXPY_Seq; V->ops->dot = VecDot_MPI; V->ops->mdot = VecMDot_MPI; V->ops->tdot = VecTDot_MPI; V->ops->norm = VecNorm_MPI; V->ops->scale = VecScale_Seq; V->ops->copy = VecCopy_Seq; V->ops->set = VecSet_Seq; V->ops->swap = VecSwap_Seq; V->ops->axpy = VecAXPY_Seq; V->ops->axpby = VecAXPBY_Seq; V->ops->maxpy = VecMAXPY_Seq; V->ops->aypx = VecAYPX_Seq; V->ops->axpbypcz = VecAXPBYPCZ_Seq; V->ops->pointwisemult = VecPointwiseMult_Seq; V->ops->setrandom = VecSetRandom_Seq; V->ops->placearray = VecPlaceArray_Seq; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_Seq; V->ops->dot_local = VecDot_Seq; V->ops->tdot_local = VecTDot_Seq; V->ops->norm_local = VecNorm_Seq; V->ops->mdot_local = VecMDot_Seq; V->ops->pointwisedivide = VecPointwiseDivide_Seq; V->ops->getlocalvector = NULL; V->ops->restorelocalvector = NULL; V->ops->getlocalvectorread = NULL; V->ops->restorelocalvectorread = NULL; V->ops->getarraywrite = NULL; } else { V->ops->dotnorm2 = VecDotNorm2_MPICUDA; V->ops->waxpy = VecWAXPY_SeqCUDA; V->ops->duplicate = VecDuplicate_MPICUDA; V->ops->dot = VecDot_MPICUDA; V->ops->mdot = VecMDot_MPICUDA; V->ops->tdot = VecTDot_MPICUDA; V->ops->norm = VecNorm_MPICUDA; V->ops->scale = VecScale_SeqCUDA; V->ops->copy = VecCopy_SeqCUDA; V->ops->set = VecSet_SeqCUDA; V->ops->swap = VecSwap_SeqCUDA; V->ops->axpy = VecAXPY_SeqCUDA; V->ops->axpby = VecAXPBY_SeqCUDA; V->ops->maxpy = VecMAXPY_SeqCUDA; V->ops->aypx = VecAYPX_SeqCUDA; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA; V->ops->pointwisemult = VecPointwiseMult_SeqCUDA; V->ops->setrandom = VecSetRandom_SeqCUDA; V->ops->placearray = VecPlaceArray_SeqCUDA; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_SeqCUDA; V->ops->dot_local = VecDot_SeqCUDA; V->ops->tdot_local = VecTDot_SeqCUDA; V->ops->norm_local = VecNorm_SeqCUDA; V->ops->mdot_local = VecMDot_SeqCUDA; V->ops->destroy = VecDestroy_MPICUDA; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA; V->ops->getlocalvector = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA; V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA; V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA; } PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[]) { PetscErrorCode ierr; Vec_CUDA *veccuda; PetscFunctionBegin; ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr); ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr); vv->ops->bindtocpu = VecBindToCPU_MPICUDA; /* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */ if (alloc && !array) { ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; } if (array) { if (!vv->spptr) { PetscReal pinned_memory_min; PetscBool flag; /* Cannot use PetscNew() here because spptr is void* */ ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)vv->spptr; veccuda->stream = 0; /* using default stream */ veccuda->GPUarray_allocated = 0; vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED; vv->minimum_bytes_pinned_memory = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); pinned_memory_min = vv->minimum_bytes_pinned_memory; ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr); if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } veccuda = (Vec_CUDA*)vv->spptr; veccuda->GPUarray = (PetscScalar*)array; } PetscFunctionReturn(0); }
2abaccf3eb8ebf5aeff52c3989e1506109924739.cu
/* This file contains routines for Parallel vector operations. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/ #include <petsc/private/cudavecimpl.h> /*MC VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise. Options Database Keys: . -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDestroy_MPICUDA(Vec v) { Vec_MPI *vecmpi = (Vec_MPI*)v->data; Vec_CUDA *veccuda; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; if (v->spptr) { veccuda = (Vec_CUDA*)v->spptr; if (veccuda->GPUarray_allocated) { err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err); veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err); } if (v->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr); ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); v->pinned_memory = PETSC_FALSE; } ierr = PetscFree(v->spptr);CHKERRQ(ierr); } ierr = VecDestroy_MPI(v);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z) { PetscReal sum,work = 0.0; PetscErrorCode ierr; PetscFunctionBegin; if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecNorm_SeqCUDA(xin,NORM_2,&work); work *= work; ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = PetscSqrtReal(sum); } else if (type == NORM_1) { /* Find the local part */ ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { /* Find the local max */ ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { PetscReal temp[2]; ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr); temp[1] = temp[1]*temp[1]; ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); z[1] = PetscSqrtReal(z[1]); } PetscFunctionReturn(0); } PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z) { PetscScalar awork[128],*work = awork; PetscErrorCode ierr; PetscFunctionBegin; if (nv > 128) { ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr); } ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr); ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr); if (nv > 128) { ierr = PetscFree(work);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA Options Database Keys: . -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v) { PetscErrorCode ierr; Vec_MPI *vw,*w = (Vec_MPI*)win->data; PetscScalar *array; PetscFunctionBegin; ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr); vw = (Vec_MPI*)(*v)->data; ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr); /* save local representation of the parallel vector (and scatter) if it exists */ if (w->localrep) { ierr = VecGetArray(*v,&array);CHKERRQ(ierr); ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr); ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr); ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr); vw->localupdate = w->localupdate; if (vw->localupdate) { ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr); } } /* New vector should inherit stashing property of parent */ (*v)->stash.donotstash = win->stash.donotstash; (*v)->stash.ignorenegidx = win->stash.ignorenegidx; /* change type_name appropriately */ ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr); (*v)->map->bs = PetscAbs(win->map->bs); (*v)->bstash.bs = win->bstash.bs; PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm) { PetscErrorCode ierr; PetscScalar work[2],sum[2]; PetscFunctionBegin; ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr); *dp = sum[0]; *nm = sum[1]; PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA(Vec vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr); ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; PetscFunctionReturn(0); } PetscErrorCode VecCreate_CUDA(Vec v) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr); if (size == 1) { ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr); } else { ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArray - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - array - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. If the user-provided array is NULL, then VecCUDAPlaceArray() can be used at a later stage to SET the array for storing the vector values. PETSc does NOT free the array when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray() @*/ PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector"); ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = VecCreate(comm,vv);CHKERRQ(ierr); ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr); ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - cpuarray - the user provided CPU array to store the vector values - gpuarray - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: If both cpuarray and gpuarray are provided, the caller must ensure that the provided arrays have identical values. Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. PETSc does NOT free the provided arrays when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(), VecCUDAAllocateCheckHost() @*/ PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr); if (cpuarray && gpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_BOTH; } else if (cpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_CPU; } else if (gpuarray) { (*vv)->offloadmask = PETSC_OFFLOAD_GPU; } else { (*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } PetscFunctionReturn(0); } PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin) { PetscErrorCode ierr; PetscFunctionBegin; V->boundtocpu = pin; if (pin) { ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr); V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */ V->ops->dotnorm2 = NULL; V->ops->waxpy = VecWAXPY_Seq; V->ops->dot = VecDot_MPI; V->ops->mdot = VecMDot_MPI; V->ops->tdot = VecTDot_MPI; V->ops->norm = VecNorm_MPI; V->ops->scale = VecScale_Seq; V->ops->copy = VecCopy_Seq; V->ops->set = VecSet_Seq; V->ops->swap = VecSwap_Seq; V->ops->axpy = VecAXPY_Seq; V->ops->axpby = VecAXPBY_Seq; V->ops->maxpy = VecMAXPY_Seq; V->ops->aypx = VecAYPX_Seq; V->ops->axpbypcz = VecAXPBYPCZ_Seq; V->ops->pointwisemult = VecPointwiseMult_Seq; V->ops->setrandom = VecSetRandom_Seq; V->ops->placearray = VecPlaceArray_Seq; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_Seq; V->ops->dot_local = VecDot_Seq; V->ops->tdot_local = VecTDot_Seq; V->ops->norm_local = VecNorm_Seq; V->ops->mdot_local = VecMDot_Seq; V->ops->pointwisedivide = VecPointwiseDivide_Seq; V->ops->getlocalvector = NULL; V->ops->restorelocalvector = NULL; V->ops->getlocalvectorread = NULL; V->ops->restorelocalvectorread = NULL; V->ops->getarraywrite = NULL; } else { V->ops->dotnorm2 = VecDotNorm2_MPICUDA; V->ops->waxpy = VecWAXPY_SeqCUDA; V->ops->duplicate = VecDuplicate_MPICUDA; V->ops->dot = VecDot_MPICUDA; V->ops->mdot = VecMDot_MPICUDA; V->ops->tdot = VecTDot_MPICUDA; V->ops->norm = VecNorm_MPICUDA; V->ops->scale = VecScale_SeqCUDA; V->ops->copy = VecCopy_SeqCUDA; V->ops->set = VecSet_SeqCUDA; V->ops->swap = VecSwap_SeqCUDA; V->ops->axpy = VecAXPY_SeqCUDA; V->ops->axpby = VecAXPBY_SeqCUDA; V->ops->maxpy = VecMAXPY_SeqCUDA; V->ops->aypx = VecAYPX_SeqCUDA; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA; V->ops->pointwisemult = VecPointwiseMult_SeqCUDA; V->ops->setrandom = VecSetRandom_SeqCUDA; V->ops->placearray = VecPlaceArray_SeqCUDA; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_SeqCUDA; V->ops->dot_local = VecDot_SeqCUDA; V->ops->tdot_local = VecTDot_SeqCUDA; V->ops->norm_local = VecNorm_SeqCUDA; V->ops->mdot_local = VecMDot_SeqCUDA; V->ops->destroy = VecDestroy_MPICUDA; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA; V->ops->getlocalvector = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA; V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA; V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA; } PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[]) { PetscErrorCode ierr; Vec_CUDA *veccuda; PetscFunctionBegin; ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr); ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr); vv->ops->bindtocpu = VecBindToCPU_MPICUDA; /* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */ if (alloc && !array) { ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; } if (array) { if (!vv->spptr) { PetscReal pinned_memory_min; PetscBool flag; /* Cannot use PetscNew() here because spptr is void* */ ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)vv->spptr; veccuda->stream = 0; /* using default stream */ veccuda->GPUarray_allocated = 0; vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED; vv->minimum_bytes_pinned_memory = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); pinned_memory_min = vv->minimum_bytes_pinned_memory; ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr); if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } veccuda = (Vec_CUDA*)vv->spptr; veccuda->GPUarray = (PetscScalar*)array; } PetscFunctionReturn(0); }
bbb26cd01c790c8f7fc4df3d17ad43e444686c55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2018 Kohei Nagasawa // Read LICENSE.md for license condition of this software // #include "mandelbulbRenderer.cuh" #include "cudaUtils.cuh" #ifdef ENABLE_DOUBLE_PRECISION #include "double3.h" #endif #include <cmath> #include <stdio.h> #include <time.h> #define FLOAT_MAX 3.402823466e+38F #define ANALYTICAL_DISTANCE_ESTIMATION using namespace CudaUtils; // Constants static constexpr float s_sphereRadiusSquared = 1.22f; static constexpr float s_mandelEvalThreshold = 2.f; static constexpr float s_rayAcceleration = 0.000002f; static constexpr float s_deltaPower = 2000000.0f; static constexpr float s_maxDepth = 1.5f; static constexpr float s_minDepth = 0.0002f; static constexpr int s_numBinaryIterations = 6; static constexpr int s_numIterationsOnHost = 20; // Tables for Morton curve static constexpr unsigned int s_mortonMasksHost[] = { 0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF, 0x0000FFFF }; __device__ const unsigned int s_mortonMasks[] = { 0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF, 0x0000FFFF }; // Texture buffers texture<float, 1, hipReadModeElementType> s_positionTexture; texture<float, 1, hipReadModeElementType> s_depthTexture; texture<float, 1, hipReadModeElementType> s_deltaDepthTexture; texture<float, 1, hipReadModeElementType> s_depthDiffTexture; texture<int, 1, hipReadModeElementType> s_iterationTexture; #define ENCODE_MORTON_CURVE(MASK) \ unsigned int x = xPos; \ unsigned int y = yPos; \ x = (x | (x << 8)) & MASK[3]; \ x = (x | (x << 4)) & MASK[2]; \ x = (x | (x << 2)) & MASK[1]; \ x = (x | (x << 1)) & MASK[0]; \ y = (y | (y << 8)) & MASK[3]; \ y = (y | (y << 4)) & MASK[2]; \ y = (y | (y << 2)) & MASK[1]; \ y = (y | (y << 1)) & MASK[0]; \ const unsigned int result = x | (y << 1); \ return result // Encode Morton curve on CPU unsigned int encodeMortonCurveHost(unsigned short xPos, unsigned short yPos) { ENCODE_MORTON_CURVE(s_mortonMasksHost); } // Evaluate Mandelbulb at the given position on host CPU template <int N> bool evalMandelbulbOnHost(const float3& pos, int numIterations, int& iterationLeft) { float3 hc = pos; float r; // Evaluate mandelbulb at the point with the given iteration count for (int i = 0; i < numIterations; i++) { if (hc.x == 0) { iterationLeft = numIterations - i; break; } r = sqrt(hc.x * hc.x + hc.y * hc.y + hc.z * hc.z); if (r > s_mandelEvalThreshold) { // r is diverged, which means we will never hit the surface. // Abort iteration. iterationLeft = numIterations - i; } if (hc.x != 0) { float phi = atan(hc.y / hc.x); float theta = acos(hc.z / r); r = pow(r, N); theta = N * theta; phi = N * phi; const float sinth = sin(theta) * r; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * r; } hc = hc + pos; } // We didn't diverged withint the iteration count. // Then this point is considered as surface of mandelbulb. iterationLeft = 0; return true; } // Perform ray marching from the center of view on CPU void MandelbulbRenderer::rayMarchOnHost(float initialDeltaDepth) { printf("----- Begin Ray Marching -----\n"); const float3& direction = m_cameraForward; float3 originalPosition = m_cameraPosition; { const float cameraDistanceSquared = dot(originalPosition, originalPosition); if (cameraDistanceSquared >= s_sphereRadiusSquared) { // Camera is outside of the radius. // Ray should start from sphere's surface of the radius originalPosition = originalPosition + direction * sqrt(cameraDistanceSquared - s_sphereRadiusSquared); } } float3 pos = originalPosition; float deltaDepth = initialDeltaDepth > 0 ? initialDeltaDepth : m_initialDeltaStep; // Get iteration count const int numIterations = 10; while (1) { // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb bool result; int iterationLeft; { // Evaluate Mandelbulb result = evalMandelbulbOnHost<8>(pos, numIterations, iterationLeft); } // Calculate the current depth float depth = dot(pos - originalPosition, direction); printf("%f,%d\n", depth, numIterations - iterationLeft); if (result) { // We got a hit! printf("----- Hit : End of Ray Marching -----\n"); return; } // Update depth and position pos = pos + direction * deltaDepth; } printf("----- No Hit : End of Ray Marching -----\n"); return; } namespace MandelbulbCudaKernel { // Get pixel index __device__ unsigned int getPixelIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } // Encode pixel coordenates into morton curve index __device__ unsigned int encodeMortonCurve(unsigned short xPos, unsigned short yPos) { ENCODE_MORTON_CURVE(s_mortonMasks); } // Decode morton curve index into pixel coordinates __device__ void decodeMortonCurve(unsigned int morton, unsigned short& xPos, unsigned short& yPos) { unsigned int x = morton & s_mortonMasks[0]; unsigned int y = (morton & (s_mortonMasks[0] << 1)) >> 1; x = (x | x >> 1) & s_mortonMasks[1]; x = (x | x >> 2) & s_mortonMasks[2]; x = (x | x >> 4) & s_mortonMasks[3]; x = (x | x >> 8) & s_mortonMasks[4]; y = (y | y >> 1) & s_mortonMasks[1]; y = (y | y >> 2) & s_mortonMasks[2]; y = (y | y >> 4) & s_mortonMasks[3]; y = (y | y >> 8) & s_mortonMasks[4]; xPos = x; yPos = y; } // Get index of pixel from its coordinate __device__ unsigned int getIndex(int x, int y, int width, bool mortonCurve) { if (mortonCurve) { return encodeMortonCurve((unsigned short)x, (unsigned short)y); } else { return x + y * width; } } // Get index of neighbor pixel __device__ unsigned int getNeighborIndex(unsigned int index, int xOffset, int yOffset, int width, bool mortonCurve) { if (mortonCurve) { unsigned short x, y; decodeMortonCurve(index, x, y); return encodeMortonCurve(x + (unsigned short)xOffset, y + (unsigned short)yOffset); } else { return index + xOffset + width * yOffset; } } // Set coordinates of each pixcel __global__ void setTexcoords(unsigned int numPixcels, int width, int height, bool mortonCurve, int2* texcoords) { const unsigned int id = getPixelIndex(); if (id < numPixcels) { int2& cord = texcoords[id]; if (mortonCurve) { unsigned short x, y; decodeMortonCurve(id, x, y); cord.x = x; cord.y = y; } else { const int widthIndex = id % width; const int heightIndex = (id - widthIndex) / width; cord.x = widthIndex; cord.y = heightIndex; } } } // Rotates a vector by angle around axis __device__ float3 rotate(const float3& vector, const float3& axis, const float angle) { float3 vectorOut; const float c = cos(angle); const float s = sin(angle); const float cosx = (1 - c) * axis.x; const float cosy = (1 - c) * axis.y; const float cosz = (1 - c) * axis.z; const float sinx = s * axis.x; const float siny = s * axis.y; const float sinz = s * axis.z; const float cosxy = cosx * axis.y; const float cosxz = cosx * axis.z; const float cosyz = cosy * axis.z; vectorOut.x = (c + cosx * axis.x) * vector.x + (cosxy - sinz) * vector.y + (cosxz + siny) * vector.z; vectorOut.y = (cosxy + sinz) * vector.x + (c + cosy * axis.y) * vector.y + (cosyz - sinx) * vector.z; vectorOut.z = (cosxz - siny) * vector.x + (cosyz + sinx) * vector.y + (c + cosz * axis.z) * vector.z; return vectorOut; } // Set ray marching directions for each pixel __global__ void setPixelDirection( const unsigned int numPixels, const int halfWidth, const int halfHeight, const int2* texcoords, const float3 forwardDir, const float3 upDir, const float3 sideDir, const float dagl, float3* pixelDirs) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const int2& cord = texcoords[id]; const float xAngle = (cord.x - halfWidth) * dagl; const float yAngle = (cord.y - halfHeight) * dagl; float3& pixelDir = pixelDirs[id]; pixelDir = rotate(forwardDir, upDir, xAngle); pixelDir = rotate(pixelDir, sideDir, yAngle); pixelDir = normalize(pixelDir); } } // Set ray marching directions for each pixel __global__ void setPixelDirectionLow( const unsigned int numPixels, const int halfWidth, const int halfHeight, const int2* texcoords, const float3 forwardDir, const float3 upDir, const float3 sideDir, const float dagl, float3* pixelDirs) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const int2& cord = texcoords[id]; float xAngle; if (cord.y % 2 == 0) { xAngle = (2 * cord.x - halfWidth) * dagl; } else { xAngle = (2 * cord.x + 1 - halfWidth) * dagl; } const float yAngle = (cord.y - halfHeight) * dagl; float3& pixelDir = pixelDirs[id]; pixelDir = rotate(forwardDir, upDir, xAngle); pixelDir = rotate(pixelDir, sideDir, yAngle); pixelDir = normalize(pixelDir); } } // Actual evaluation of mandelbulb at given position // Depth is used to determined the number of iterations // Returns 1 if we hit mandelbulb or negative value if we didn't // The smaller the returned value is, the sooner iteration ended // which means the further the point is from the surface template <int N> __device__ bool evalMandelbulb(const float3& pos, const int numIterations, int& iterationLeft, float& potential, float& dr) { float3 hc = pos; float r = 0.0f; dr = 1.0f; // Evaluate mandelbulb at the point with the given iteration count for (int i = 0; i < numIterations; i++) { if (hc.x == 0) { iterationLeft = numIterations - i; potential = log(r) / pow((float)N, float(i)); dr = 0.5f * log(r) * r / dr; return false; } r = sqrt(dot(hc, hc)); if (r > s_mandelEvalThreshold) { // r is diverged, which means we will never hit the surface. // Abort iteration. iterationLeft = numIterations - i; potential = log(r) / pow((float)N, float(i)); dr = 0.5f * log(r) * r / dr; return false; } if (hc.x != 0) { float phi = atan(hc.y / hc.x); float theta = acos(hc.z / r); r = pow(r, N); theta = N * theta; phi = N * phi; const float sinth = sin(theta) * r; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * r; } hc += pos; dr = pow(r, N - 1) * (float)N * dr + 1.0f; } // We didn't diverged withint the iteration count. // Then this point is considered as surface of mandelbulb. iterationLeft = 0; potential = 0; return true; } template <int N> __device__ float potential(int numIteration, const float3& pos) { float3 hc = pos; float r; for (int i = 0; i < numIteration; ++i) { r = sqrt(dot(hc, hc)); if (r > s_mandelEvalThreshold) { return log(r) / pow((float)N, float(i)); } float theta = acos(hc.z / r); float phi = atan(hc.y / hc.x); float zr = pow(r, (float)N); theta = theta*(float)N; phi = phi*(float)N; const float sinth = sin(theta) * zr; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * zr; hc = pos + hc; } return 0; } // Calculate the number of iterations used for Mandelbulb evaluation __device__ int getNumIterations(float depth, float iterationAccelerator, int minimumIterations) { return (int)(-log(pow(depth, iterationAccelerator)) / log(50.f)) + minimumIterations; } // Evaluate Mandelbulb by ray marching template <int N> __device__ bool rayMarchWithAcceleration( float3 pos, const float3& direction, float iterationAccelerator, int minimumIterations, int fixedIteration, bool distanceEstimation, int& pixelIteration, int& numSteps, float& depth, float& deltaDepth) { float rayAccelerator = 0; float pot, dr; int currentIteration = -1; int iterationLeft; bool result; numSteps = 0; while (1) { ++numSteps; // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb { currentIteration = fixedIteration > 0 ? fixedIteration : getNumIterations(depth, iterationAccelerator, minimumIterations); // Evaluate Mandelbulb result = evalMandelbulb<N>(pos, currentIteration, iterationLeft, pot, dr); // Update iteration count of this pixel pixelIteration = currentIteration; // [todo] Review and validate this code //if (!result && iterationLeft > 1) //{ // // Iteration count is too big. Make it smaller // minimumIterations = currentIteration - iterationLeft + 1; //} } if (result) { // We got a hit! return true; } // Update depth and position float delta = deltaDepth; float acceleration = s_rayAcceleration; if (distanceEstimation) { #ifndef ANALYTICAL_DISTANCE_ESTIMATION // Approximate derivative by gradient const float eps = 0.01f * deltaDepth; float3 posx = pos; posx.x += eps; float3 posy = pos; posy.y += eps; float3 posz = pos; posz.z += eps; float3 grad; grad.x = potential<N>(currentIteration, posx); grad.y = potential<N>(currentIteration, posy); grad.z = potential<N>(currentIteration, posz); grad = (grad - pot) / eps; float newDelta = max(delta, (0.5f / exp(pot))*sinh(pot) / sqrt(dot(grad, grad))); #else // Analytical solution float newDelta = max(delta, dr); #endif acceleration *= newDelta / delta; delta = newDelta; } depth += delta; pos = pos + direction * delta; // Accelerate the ray gradually while it gets far from the camera rayAccelerator += acceleration; deltaDepth *= pow(s_deltaPower, rayAccelerator + iterationLeft * acceleration); // [todo] Investigate this calculation } return false; } // Perform ray marching against mandelbulb // and store depth to mandelbulb and delta depth of ray marching for each pixel template <int N> __global__ void raymarchMandelbulb_kernel( const unsigned int numPixels, const float3* pixelDirs, const float3 cameraPosition, float deltaDepth, float iterationAccelerator, int minimumIterations, bool distanceEstimation, int* pixelIterations, int* numRayMarchSteps, float* pixelDepths, float* pixelDeltaDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Reset iteration count of mandelbulb evaluation pixelIterations[id] = 0; float3 pos; const float3& direction = pixelDirs[id]; // First, check if we need to evaluate mandelbulb from the first place bool shouldEval = false; { const float cameraDistanceSquared = dot(cameraPosition, cameraPosition); const float projection = dot(cameraPosition, direction); if (projection > 0) { // Camera is facing the opposite way of the origin // We don't need to evaluate at all if the camera is outside of the radius if (cameraDistanceSquared < s_sphereRadiusSquared) { // If the camera is inside the radius, ray should start from there pos = cameraPosition + deltaDepth * direction; shouldEval = true; } } else { // Camera is facing to mandelbulb // Now check if the direction of ray can possibly hit mandelbulb const float dist = cameraDistanceSquared - projection * projection; if (dist < s_sphereRadiusSquared) { // Ray can hit mandelbulb // Now determine the start position of the ray if (cameraDistanceSquared < s_sphereRadiusSquared) { // If the camera is inside the radius, ray should start from there pos = cameraPosition + deltaDepth * direction; } else { // Camera is outside of the radius. // Ray should start from sphere's surface of the radius float d1 = -projection; float d2 = sqrt(s_sphereRadiusSquared - dist); pos = cameraPosition + direction * (d1 - d2 + deltaDepth); } shouldEval = true; } } } // Secondly, evaluate mandelbulb if needed if (shouldEval) { // Calculate the current depth float depth = dot(pos - cameraPosition, direction); // Perform ray marching bool hit = rayMarchWithAcceleration<N>( pos, direction, iterationAccelerator, minimumIterations, -1, distanceEstimation, pixelIterations[id], numRayMarchSteps[id], depth, deltaDepth); if (hit) { // We hit mandelbulb. // Store depth and delta depth of the hit pixelDepths[id] = depth; pixelDeltaDepths[id] = deltaDepth; return; } } // No hit pixelDepths[id] = FLOAT_MAX; pixelDeltaDepths[id] = 1.0f; pixelIterations[id] = 0; return; } // Perform bisection search to get surface position with higher precision template <int N> __global__ void binaryPartitionSearch( const unsigned int numPixels, const float3* pixelDirs, const float* pixelDeltaDepths, const float3 cameraPosition, const int* pixelIterations, float3* pixelPositions, float* pixelDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } float depth = pixelDepths[id]; if (depth != FLOAT_MAX) { // We have hit mandelbulb at this pixel const float3& dir = pixelDirs[id]; const float deltaDepth = pixelDeltaDepths[id]; float currentDelta = deltaDepth; // Move backward one step float3 pos = cameraPosition + dir * (depth - deltaDepth); // Perform bi-section search for (int i = 0; i < s_numBinaryIterations; i++) { // Move half way currentDelta *= 0.5f; pos = pos + dir * currentDelta; depth += currentDelta; int numIterations = pixelIterations[id]; int iterationLeft; float pot, grad; if (evalMandelbulb<N>(pos, numIterations, iterationLeft, pot, grad)) { // We got a hit // Revert the last step and test again with smaller delta pos = pos - dir * currentDelta; depth -= currentDelta; } } // Store the result pixelDepths[id] = depth + currentDelta; pixelPositions[id] = pos + dir * currentDelta; } else { // There was no hit // Just set invalid position pixelPositions[id] = { FLOAT_MAX, FLOAT_MAX, FLOAT_MAX }; } } // Drill down mandelbulb's surface by changing iteration count adaptively based on smoothness of the surface // This kernel should be followed by compareDepthDiffs_kernel template <int N> __global__ void adaptiveIteration_kernel( const unsigned int numPixels, const float3* pixelDirs, const float3 cameraPosition, int minimumIterations, float iterationToDeltaStepRate, bool distanceEstimation, int* pixelIterations, float* pixelDepths, float* pixelDepthsTmp, float* pixelDeltaDepths, float* depthDiffs) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } if (pixelIterations[id] <= 0) { // This pixel is already done. Abort further drilling. return; } // Increment iteration count by one int currentIteration = pixelIterations[id] + 1; const float3& dir = pixelDirs[id]; float deltaDepth = pixelDeltaDepths[id] * iterationToDeltaStepRate; float depth = pixelDepths[id]; float3 pos = cameraPosition + dir * depth; bool hit = false; // Perform ray marching with the new iteration count but without ray acceleration while (1) { // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb int iterationLeft; float pot, dr; bool result = evalMandelbulb<N>(pos, currentIteration, iterationLeft, pot, dr); if (result) { // We hit the surface hit = true; break; } // [todo] Review and validate this code // //else if (iterationLeft > 1) //{ // // We didn't hit the surface and the iteration count is too big now. // // This means the ray is now moving aways from the surface. // // Reduce the iteration count and perform ray march with acceleration again. // // Calculate new iteration count and update delta depth // int numIterations = max(currentIteration - iterationLeft + 1, minimumIterations); // deltaDepth *= float(iterationLeft - 1) / iterationToDeltaStepRate; // // Perform ray marching // int dummy; // hit = rayMarchWithAcceleration<N>( // pos, // dir, // 0.f, // 0, // numIterations, // distanceEstimation, // dummy, // depth, // deltaDepth // ); // if (hit) // { // // We hit a new surface // // Continue the loop from there with new parameters // pos = cameraPosition + dir * depth; // pixelDepths[id] = depth; // pixelDeltaDepths[id] = deltaDepth; // pixelIterations[id] = numIterations; // currentIteration = numIterations + 1; // deltaDepth *= iterationToDeltaStepRate; // hit = false; // isFirstStep = true; // //continue; // } // // We didn't hit anything. // // Just break the loop to make this pixel invalid. // //break; //} // Update depth and position and evaluate mandelbulb again float delta = deltaDepth; if (distanceEstimation) { #ifndef ANALYTICAL_DISTANCE_ESTIMATION // Approximate derivative by gradient const float eps = 0.01f * deltaDepth; float3 posx = pos; posx.x += eps; float3 posy = pos; posy.y += eps; float3 posz = pos; posz.z += eps; float3 grad; grad.x = potential<N>(currentIteration, posx); grad.y = potential<N>(currentIteration, posy); grad.z = potential<N>(currentIteration, posz); grad = (grad - pot) / eps; float newDelta = max(delta, (0.5f / exp(pot))*sinh(pot) / sqrt(dot(grad, grad))); #else // Analytical solution float newDelta = max(delta, dr); #endif delta = newDelta; } depth += delta; pos = pos + dir * delta; } if (hit) { // When we hit a new surface with incremented iteration count, set depth difference from the original and temporary depth depthDiffs[id] = (depth - pixelDepths[id]) / depth; pixelDepthsTmp[id] = depth; } else { // We didn't hit any surface with the new iteration // Just set invalid values pixelDepths[id] = FLOAT_MAX; depthDiffs[id] = FLOAT_MAX; pixelIterations[id] = 0; } } // Compare depth difference of nearby pixels and calculate screen space laplacian of it to determine // if the surface is still smooth enough to drill down further __global__ void compareDepthDiffs_kernel( const unsigned int numPixels, const int width, const int height, const float laplacianThreshold, const float* pixelDepthDiffs, const float* pixelDepthsTmp, const int2* texcoords, float iterationToDeltaStepRate, bool mortonCurve, bool upsampling, int* pixelIterations, float* pixelDepths, float* pixelDeltaDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const float depth = pixelDepths[id]; if (depth == FLOAT_MAX) { return; } const float diff = tex1Dfetch(s_depthDiffTexture, id); const float depthPlusDiff = depth + diff * pixelDepthsTmp[id]; const int2& cord = texcoords[id]; const int x = cord.x; const int y = cord.y; // Threshold of difference of depth between two pixels to calculate second derivative of depth diff // [todo] investigate if we need this condiyion //const float threshold = FLOAT_MAX; //const float invDepth = 1.f / depthPlusDiff; // Gradient of depth difference at neighbor pixels float gradDiffX1, gradDiffX2; float gradDiffY1, gradDiffY2; const int neighborOffsetX = upsampling ? 1 : 2; const int neighborOffsetY = 2; // [todo] Laplacian should be calculated only based on neighbors with the same iteration count as this pixel. // We are not checking iteration count of neighbors now but we should. unsigned int neighbor; float neighborDiff; // Left pixel if (x > 1) { neighbor = getNeighborIndex(id, -neighborOffsetX, 0, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffX1 = diff - neighborDiff; } //else //{ // gradDiffX1 = 0; //} } else { gradDiffX1 = 0; } // Right pixel if (x < width - neighborOffsetX) { neighbor = getNeighborIndex(id, neighborOffsetX, 0, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffX2 = neighborDiff - diff; } //else //{ // gradDiffX2 = 0; //} } else { gradDiffX2 = 0; } // Up pixel if (y > 1) { neighbor = getNeighborIndex(id, 0, -neighborOffsetY, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffY1 = diff - neighborDiff; } //else //{ // gradDiffY1 = 0; //} } else { gradDiffY1 = 0; } // Down pixel if (y < height - neighborOffsetY) { neighbor = getNeighborIndex(id, 0, neighborOffsetY, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffY2 = neighborDiff - diff; } //else //{ // gradDiffY2 = 0; //} } else { gradDiffY2 = 0; } // Calculate laplacian of depth difference const float laplacian = (gradDiffX2 - gradDiffX1) + (gradDiffY2 - gradDiffY1); if (fabs(laplacian) < laplacianThreshold) { // If laplacian is smaller than the threshold, it means the surface is smooth enough to drill down one more iteration pixelIterations[id] += 1; pixelDepths[id] = depthPlusDiff; pixelDeltaDepths[id] *= iterationToDeltaStepRate; } else { // Otherwise, negate the iteration count to indicate we cannot drill down this pixel any further pixelIterations[id] = -pixelIterations[id]; } } // Finalize data after adaptive drilling down __global__ void finalizeIteration_kernel( const unsigned int numPixels, //const int width, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Set negative iteration count back to the original positive value if (pixelIterations[id] < 0) { pixelIterations[id] = -pixelIterations[id]; } // Take average of neaby iteration counts // [todo] Investigate if we really need this process. // It seems it could just increase artifacts. // //const int iteration = pixelIterations[id]; //int count = 1; //int sum = iteration; //for (int i = -2; i <= 2; ++i) //{ // for (int j = -2; j <= 2; ++j) // { // //int neighbor = id + i + width * j; // const int neighbor = getNeighborIndex(id, i, j, width); // const int neighborIteration = tex1Dfetch(s_iterationTexture, neighbor); // if (abs(iteration - neighborIteration) < 3) // { // ++count; // sum += neighborIteration; // } // } //} //pixelIterations[id] = sum / count; } // Copy data from low resolution buffer to high resolution buffer __global__ void copyFromLowToHigh( const unsigned int numPixels, int width, bool mortonCurve, const int2* texcoordsLow, const float* pixelDepthsLow, const float* pixelDeltaDepthsLow, const int* pixelIterationsLow, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Calculate pixel index of high resolution buffers const int2& texcoord = texcoordsLow[id]; const int x = texcoord.y % 2 == 0 ? texcoord.x * 2 : texcoord.x * 2 + 1; unsigned int highResId = getIndex(x, texcoord.y, width, mortonCurve); pixelDepths[highResId] = pixelDepthsLow[id]; pixelDeltaDepths[highResId] = pixelDeltaDepthsLow[id]; pixelIterations[highResId] = pixelIterations[id]; } __device__ void getValuesFromClosestNeighbor( const unsigned int neighbor, float& closestDepth, float& closestDeltaDepth, int& closestIteration, int& maxIteration ) { float depth = tex1Dfetch(s_depthTexture, neighbor); int iteration = tex1Dfetch(s_iterationTexture, neighbor); if (depth < closestDepth) { closestDepth = depth; closestIteration = iteration; closestDeltaDepth = tex1Dfetch(s_deltaDepthTexture, neighbor); } if (iteration > maxIteration) { maxIteration = iteration; } } template <int N> __global__ void upsample( const unsigned int numPixels, const float3& cameraPosition, const int2* texcoordsLow, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int x = texcoord.x; const int y = texcoord.y; float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { const int halfWidth = width / 2; unsigned int neighbor; const int oddOffset = y % 2 == 0 ? 0 : 1; // Left pixel if(oddOffset || x > 0) { neighbor = getNeighborIndex(id, -oddOffset, 0, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if(!oddOffset || x + 1 < halfWidth) { neighbor = getNeighborIndex(id, 1-oddOffset, 0, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if(y > 0) { neighbor = getNeighborIndex(id, 0, -1, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(id, 0, 1, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } const unsigned int highResId = getIndex(y % 2 == 0 ? 2 * x + 1 : 2 * x, y, width, mortonCurve); const float3& dir = pixelDirs[highResId]; float3 pos = cameraPosition + dir * closestDepth; int iterationLeft; float potential, dr; int res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); if (res > 0) { while (res > 0) { closestDepth -= closestDeltaDepth; pos -= dir * closestDeltaDepth; res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); } } else { closestDepth = closestDepth + closestDeltaDepth; pos += dir * closestDeltaDepth; int pixelIteration; rayMarchWithAcceleration<N>( pos, dir, iterationAccelerator, minimumIterations, -1, false, pixelIteration, closestIteration, closestDepth, closestDeltaDepth); } pixelDepths[highResId] = closestDepth; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } // Upsample template <int N> __global__ void upsamplePhase1( const unsigned int numPixels, const float3& cameraPosition, int widthLow, int heightLow, int width, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float* pixelDepthsLow, const float* pixelDeltaDepthsLow, const int* pixelIterationsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; for (int i = 0; i <= 1; ++i) { if (baseX + i <= widthLow) { continue; } for (int j = 0; j <= 1; ++j) { if (baseY + j <= heightLow) { continue; } neighbor = getNeighborIndex(id, i, j, widthLow, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } } const int x = 2 * baseX + 1; const int y = 2 * baseY + 1; const unsigned int highResId = getIndex(x, y, width, mortonCurve); const float3& dir = pixelDirs[highResId]; float3 pos = cameraPosition + dir * closestDepth; int iterationLeft; float potential, dr; int res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); if (res > 0) { while (res > 0) { closestDepth -= closestDeltaDepth; pos -= dir * closestDeltaDepth; res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); } } else { closestDepth = closestDepth + closestDeltaDepth; pos += dir * closestDeltaDepth; int pixelIteration; rayMarchWithAcceleration<N>( pos, dir, iterationAccelerator, minimumIterations, -1, false, pixelIteration, closestIteration, closestDepth, closestDeltaDepth); } pixelDepths[highResId] = closestDepth; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } template <int N> __global__ void upsamplePhase2( const unsigned int numPixels, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; const int x = (baseY % 2 == 0) ? (2 * baseX + 1) : (2 * baseX); const int y = 2 * baseY; const unsigned int highResId = getIndex(x, y, width, mortonCurve); float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; // Left pixel if (x > 0) { neighbor = getNeighborIndex(highResId, -1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if (x + 1 < width) { neighbor = getNeighborIndex(highResId, 1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if (y > 0) { neighbor = getNeighborIndex(highResId, 0, -1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(highResId, 0, 1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } pixelDepths[highResId] = FLOAT_MAX; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } template <int N> __global__ void upsamplePhase3( const unsigned int numPixels, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; const int x = 2 * baseX; const int y = (baseX % 2 == 0) ? (2 * baseY + 1) : (2 * baseY); const unsigned int highResId = getIndex(x, y, width, mortonCurve); float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; // Left pixel if (x > 0) { neighbor = getNeighborIndex(highResId, -1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if (x + 1 < width) { neighbor = getNeighborIndex(highResId, 1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if (y > 0) { neighbor = getNeighborIndex(highResId, 0, -1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(highResId, 0, 1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } pixelDepths[highResId] = FLOAT_MAX; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } // Evaluate normal of pixels by simplified screen space calculation __global__ void samplePseudoScreenSpaceNormals_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, const float3 cameraPos, bool mortonCurve, float3* pixelNormals) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const unsigned int id3 = id * 3; // Get surface position float3 pos; pos.x = tex1Dfetch(s_positionTexture, id3); pos.y = tex1Dfetch(s_positionTexture, id3 + 1); pos.z = tex1Dfetch(s_positionTexture, id3 + 2); if (pos.x == FLOAT_MAX) return; // Get pixel coordinates const int2& texcoord = texcoords[id]; const int x = texcoord.x; const int y = texcoord.y; float p1, p2; unsigned int neighborId; float3 nor = { 0.0f, 0.0f, 0.0f }; // Right pixel if (x > 0) { neighborId = 3 * getNeighborIndex(id, -1, 0, width, mortonCurve); p1 = pos.x - tex1Dfetch(s_positionTexture, neighborId); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.x += p2; nor.z += p1; } // Left pixel if (x + 1 < width) { neighborId = 3 * getNeighborIndex(id, 1, 0, width, mortonCurve); p1 = pos.x - tex1Dfetch(s_positionTexture, neighborId); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.x -= p2; nor.z -= p1; } // Up pixel if (y > 0) { neighborId = 3 * getNeighborIndex(id, 0, -1, width, mortonCurve); p1 = pos.y - tex1Dfetch(s_positionTexture, neighborId + 1); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.y += p2; nor.z += p1; } // Down pixel if (y + 1 < height) { neighborId = 3 * getNeighborIndex(id, 0, 1, width, mortonCurve); p1 = pos.y - tex1Dfetch(s_positionTexture, neighborId + 1); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.y -= p2; nor.z -= p1; } pixelNormals[id] = normalize(nor); } // Evaluate normal of pixels by screen space depth __global__ void sampleScreenSpaceNormals_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, const float3 cameraPos, bool mortonCurve, float3* pixelNormals) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const unsigned int id3 = id * 3; // Get surface position float3 pos; pos.x = tex1Dfetch(s_positionTexture, id3); pos.y = tex1Dfetch(s_positionTexture, id3 + 1); pos.z = tex1Dfetch(s_positionTexture, id3 + 2); if (pos.x == FLOAT_MAX) return; // Get pixel coordinates const int2& texcoord = texcoords[id]; const int x = texcoord.x; const int y = texcoord.y; unsigned int neighborId; float3 neighborPos1, neighborPos2; // Right pixel if (x != 0) { neighborId = 3 * getNeighborIndex(id, -1, 0, width, mortonCurve); neighborPos1.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos1.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos1.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos1 = pos; } // Left pixel if (x + 1 != width) { neighborId = 3 * getNeighborIndex(id, 1, 0, width, mortonCurve); neighborPos2.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos2.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos2.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos2 = pos; } const float3 dx = neighborPos2 - neighborPos1; // Up pixel if (y != 0) { neighborId = 3 * getNeighborIndex(id, 0, -1, width, mortonCurve); neighborPos1.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos1.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos1.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos1 = pos; } // Down pixel if (y + 1 != height) { neighborId = 3 * getNeighborIndex(id, 0, 1, width, mortonCurve); neighborPos2.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos2.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos2.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos2 = pos; } const float3 dy = neighborPos2 - neighborPos1; pixelNormals[id] = normalize(cross(dx, dy)); } // Apply SSAO effect __global__ void applySSAO_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, bool mortonCurve, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& cord = texcoords[id]; const int x = cord.x; const int y = cord.y; float depth = tex1Dfetch(s_depthTexture, id); unsigned int shieldCount = 1; unsigned int neighborId; float neighborDepth; for (int i = -3; i <= 3; ++i) { if ((x == 0 && i < 0) || (x + 1 == width && i > 0)) { continue; } for (int j = -3; j <= 3; ++j) { if (i == 0 && j == 0) { continue; } if (i * i + j * j > 9) { continue; } if ((y == 0 && j < 0) || (y + 1 == height && j > 0)) { continue; } neighborId = getNeighborIndex(id, i, j, width, mortonCurve); neighborDepth = tex1Dfetch(s_depthTexture, neighborId); if (depth > neighborDepth) { ++shieldCount; } } } float shield = 1.f / (float)shieldCount * 9.f + 0.3f; // Clamp shield if (shield > 1.0f) { return; } // Modify the color pixelColors[id].x *= shield; pixelColors[id].y *= shield; pixelColors[id].z *= shield; } template <int N> __global__ void castShadow( const unsigned int numPixels, const unsigned int minimumIterations, const float3* pixelPositions, const float* pixelDepths, const float3* lightPositions, const unsigned int numLights, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } if (id < numPixels) { const float3& pixelPos = pixelPositions[id]; const float depth = pixelDepths[id]; float deltaDepth = 0.0005f * depth; bool lit = false; for (int l = 0; l < numLights; ++l) { const float3 dir = lightPositions[l] - pixelPos; float3 pos = pixelPos + dir * deltaDepth; float count = 0; int i; for (i = 0; i < 25; i++) { if (dot(pos, pos) > s_sphereRadiusSquared) { lit = true; break; } const int currentIteration = getNumIterations(depth, 0, minimumIterations); int iterationLeft; float residual, potential; if (evalMandelbulb<N>(pos, currentIteration, iterationLeft, potential, residual)) { break; } pos = pos + dir * deltaDepth; count += 0.0002f; deltaDepth *= pow(1000.0f, count); } if (i == 25) { lit = true; break; } } if (!lit) { pixelColors[id] *= 0.5f; } } } // Calculate color of pixels based on position, normal and lights __global__ void setColorFromPos( const unsigned int numPixels, const float3* pixelPositions, const float* pixelDepths, const float3* pixelNormals, const float3 cameraPosition, const float3* lightPositions, const unsigned int numLights, bool useNormal, bool colorfulMode, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } float3 col; const float3& pos = pixelPositions[id]; if (pixelDepths[id] != FLOAT_MAX) { const float gain = 0.2f; const float ampl = 1.2f; col = (pos + ampl) * gain; if (useNormal) { const float3& normal = pixelNormals[id]; const float div = dot(normal, pos); const float divSquared = div * div; float diffuse = 0; float specular = 0; for (int i = 0; i < numLights; ++i) { const float3& lightPosition = lightPositions[i]; diffuse += 0.5f * max(dot(normal, normalize(lightPosition - pos)), 0.0f); specular += pow(max(dot(normal, normalize(cameraPosition - lightPosition)), 0.0f), 8.0f); } diffuse *= 2.f; specular *= 0.5f; const float ambient = 1.0f; const float ambientAndDiffuse = ambient + diffuse; if (colorfulMode) { float col1 = 1.0f, col2 = 1.0f; if (div > 0.0f) { col2 += divSquared; } else { col1 += divSquared; } const float col3 = 0.003f / (divSquared * divSquared + 0.01f); col.x = col.x * ambientAndDiffuse * col1 + specular; col.y = col.y * ambientAndDiffuse + specular + col3; col.z = col.z * ambientAndDiffuse * col2 + specular; } else { col.x = col.x * ambientAndDiffuse + specular; col.y = col.y * ambientAndDiffuse + specular; col.z = col.z * ambientAndDiffuse + specular; } } // Visualize normal as color //if (useNormal) //{ // const float3& normal = pixelNormals[id]; // col = normal; //} } else { col.x = 0.0f; col.y = 0.0f; col.z = 0.0f; } pixelColors[id] = col; } // Convert floating point [0:1] color into 256 color __global__ void colFloatToByte( const unsigned int numPixels, const float3* pixelColorsFloat, int width, bool mortonCurve, unsigned char* pixelColorsUChar) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const float3 col = pixelColorsFloat[id]; const float r = col.x > 1.f ? 1.0 : (col.x < 0.f ? 0.f : col.x); const float g = col.y > 1.f ? 1.0 : (col.y < 0.f ? 0.f : col.y); const float b = col.z > 1.f ? 1.0 : (col.z < 0.f ? 0.f : col.z); unsigned int outId; if (mortonCurve) { unsigned short x, y; decodeMortonCurve(id, x, y); outId = x + width * y; } else { outId = id; } unsigned char* colUChar = &pixelColorsUChar[outId * 3]; colUChar[0] = (unsigned char)(255 * r); colUChar[1] = (unsigned char)(255 * g); colUChar[2] = (unsigned char)(255 * b); } } #ifdef ENABLE_DOUBLE_PRECISION #include "mandelbulbRendererDouble.cu" #endif } // namespace MandelbulbCudaKernel // Costructor MandelbulbRenderer::MandelbulbRenderer(unsigned int width, unsigned int height) : m_width(width) , m_height(height) , m_numPixels(width * height) { hipSetDevice(0); hipDeviceSynchronize(); computeGridSize(m_numPixels, 256, m_numBlocks, m_numThreads); computeGridSize(m_numPixels / 2, 256, m_numBlocksForLow, m_numThreadsForLow); allocateMemory(); hipLaunchKernelGGL(( MandelbulbCudaKernel::setTexcoords), dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, (int)m_width, (int)m_height, m_mortonCurve, m_texcoords); } // Destructor MandelbulbRenderer::~MandelbulbRenderer() { freeMemory(); freeAdaptiveIterationsMemory(); freeUpsamplingMemory(); } // Update camera information void MandelbulbRenderer::updateCamera(const float3& pos, const float3& forward, const float3& up, const float3& side) { m_cameraPosition = pos; m_cameraForward = forward; m_cameraUp = up; m_cameraSide = side; setDirty(); } // Set pixel angle void MandelbulbRenderer::setPixelAngle(float angle) { if (angle > 0) { m_pixelAngle = angle; } } // Calculate mandelbulb and set colors to pixel buffer void MandelbulbRenderer::createMandelbulb(unsigned char* pixcelColorsHost) { using namespace MandelbulbCudaKernel; clock_t timer; if (m_profile) { timer = clock(); } // Set initial iteration count { startTimer("Initialization"); // Make sure that iteration counts is bigger than minimum counts if (m_initialIterations < m_minimumIterations) { m_initialIterations = m_minimumIterations; } // Run mandelbulb at the camera position on device once and determine the initial iteration counts int iterationLeft; evalMandelbulbOnHost<8>(m_cameraPosition, s_numIterationsOnHost, iterationLeft); if (iterationLeft > 0) { int numIteration = s_numIterationsOnHost - iterationLeft + 1; if (numIteration > m_initialIterations) { m_initialIterations = numIteration; } } endTimer(); } { startTimer("Set Pixel Direction"); // Set direction of rays hipLaunchKernelGGL(( setPixelDirection) , dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, (int)m_width / 2, (int)m_height / 2, m_texcoords, m_cameraForward, m_cameraUp, m_cameraSide, m_pixelAngle, m_pixelDirs); if (m_adaptiveIterationCount && m_upsampling) { hipLaunchKernelGGL(( setPixelDirectionLow) , dim3(m_numBlocksForLow), dim3(m_numBlocksForLow), 0, 0, m_numPixels / 2, (int)m_width / 2, (int)m_height / 2, m_texcoordsLow, m_cameraForward, m_cameraUp, m_cameraSide, m_pixelAngle, m_pixelDirsLow); } hipDeviceSynchronize(); endTimer(); } { startTimer("Ray March"); #ifdef ENABLE_DOUBLE_PRECISION // Perform mandelbulb if (m_doublePrecision && !m_upsampling) { hipLaunchKernelGGL(( raymarchMandelbulbD_kernel<8>) , dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, m_pixelDirs, m_cameraPosition, m_initialDeltaStep, m_iterationAccelerator, m_initialIterations, false, m_pixelIterations, m_pixelDepths, m_pixelDeltaDepths); } else #endif { int numPixels = m_upsampling ? m_numPixels / 2 : m_numPixels; int numBlocks = m_upsampling ? m_numBlocksForLow : m_numBlocks; int numThreads = m_upsampling ? m_numThreadsForLow : m_numThreads; float3* dirs = m_upsampling ? m_pixelDirsLow : m_pixelDirs; int* iterations = m_upsampling ? m_pixelIterationsLow : m_pixelIterations; float* depths = m_upsampling ? m_pixelDepthsLow : m_pixelDepths; float* deltaDepths = m_upsampling ? m_pixelDeltaDepthsLow : m_pixelDeltaDepths; hipLaunchKernelGGL(( raymarchMandelbulb_kernel<8>) , dim3(numBlocks), dim3(numThreads), 0, 0, numPixels, dirs, m_cameraPosition, m_initialDeltaStep, m_iterationAccelerator, m_initialIterations, m_distanceEstimation, iterations, m_numRayMarchSteps, depths, deltaDepths); } hipDeviceSynchronize(); endTimer(); } if (m_adaptiveIterationCount && !m_doublePrecision) { startTimer("Adaptive Iteration"); // Drill down the surface by adaptive iteration counts int numPixels = m_upsampling ? m_numPixels / 2 : m_numPixels; int numBlocks = m_upsampling ? m_numBlocksForLow : m_numBlocks; int numThreads = m_upsampling ? m_numThreadsForLow : m_numThreads; int width = m_upsampling ? (int)m_width / 2 : (int)m_width; int height = m_upsampling ? (int)m_height / 2 : (int)m_height; int2* texcoords = m_upsampling ? m_texcoordsLow : m_texcoords; float3* dirs = m_upsampling ? m_pixelDirsLow : m_pixelDirs; float* depths = m_upsampling ? m_pixelDepthsLow : m_pixelDepths; float* deltaDepths = m_upsampling ? m_pixelDeltaDepthsLow : m_pixelDeltaDepths; float* depthDiffs = m_upsampling ? m_pixelDepthDiffsLow : m_pixelDepthDiffs; float* depthTmp = m_upsampling ? m_pixelDepthsTmpLow : m_pixelDepthsTmp; int* iterations = m_upsampling ? m_pixelIterationsLow : m_pixelIterations; hipBindTexture(0, s_depthDiffTexture, depthDiffs, numPixels * sizeof(float)); hipBindTexture(0, s_depthTexture, depths, numPixels * sizeof(float)); hipBindTexture(0, s_iterationTexture, iterations, numPixels * sizeof(int)); for (int i = 0; i < m_numDrillingIterations; ++i) { hipLaunchKernelGGL(( adaptiveIteration_kernel<8>), dim3(numBlocks), dim3(numThreads) , 0, 0, numPixels, dirs, m_cameraPosition, m_minimumIterations, m_iterationToDeltaStepRate, m_distanceEstimation, iterations, depths, depthTmp, deltaDepths, depthDiffs ); hipLaunchKernelGGL(( compareDepthDiffs_kernel), dim3(numBlocks), dim3(numThreads) , 0, 0, numPixels, width, height, m_laplacianThreshold, depthDiffs, depthTmp, texcoords, m_iterationToDeltaStepRate, m_mortonCurve, m_upsampling, iterations, depths, deltaDepths ); hipDeviceSynchronize(); } endTimer(); { startTimer("Finalize Adaptive Iteration"); hipLaunchKernelGGL(( finalizeIteration_kernel) , dim3(numBlocks), dim3(numThreads), 0, 0, numPixels, iterations ); hipDeviceSynchronize(); endTimer(); } if (m_upsampling) { startTimer("Upsample"); hipLaunchKernelGGL(( copyFromLowToHigh), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, (int)m_width, m_mortonCurve, m_texcoordsLow, m_pixelDepthsLow, m_pixelDeltaDepthsLow, m_pixelIterationsLow, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); hipBindTexture(0, s_deltaDepthTexture, deltaDepths, numPixels * sizeof(float)); hipDeviceSynchronize(); hipLaunchKernelGGL(( upsample<8>), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, m_cameraPosition, m_texcoordsLow, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); hipLaunchKernelGGL(( upsamplePhase1<8>), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, m_cameraPosition, (int)m_width / 2, (int)m_height / 2, (int)m_width, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDepthsLow, m_pixelDeltaDepthsLow, m_pixelIterationsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); hipDeviceSynchronize(); hipUnbindTexture(s_depthTexture); hipUnbindTexture(s_deltaDepthTexture); hipUnbindTexture(s_iterationTexture); hipBindTexture(0, s_depthTexture, m_pixelDepths, m_numPixels * sizeof(float)); hipBindTexture(0, s_deltaDepthTexture, m_pixelDeltaDepths, m_numPixels * sizeof(float)); hipBindTexture(0, s_iterationTexture, m_pixelIterations, m_numPixels * sizeof(int)); hipLaunchKernelGGL(( upsamplePhase2<8>), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations); hipLaunchKernelGGL(( upsamplePhase3<8>), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations); hipUnbindTexture(s_depthTexture); hipUnbindTexture(s_deltaDepthTexture); hipUnbindTexture(s_iterationTexture); endTimer(); } else { hipUnbindTexture(s_iterationTexture); hipUnbindTexture(s_depthTexture); } hipUnbindTexture(s_depthDiffTexture); } { startTimer("Bisection Search"); // Perform bisection search to determine the final position of the surface #ifdef ENABLE_DOUBLE_PRECISION if (m_doublePrecision && !m_upsampling) { binaryPartitionSearchD<8> << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelDirs, m_pixelDeltaDepths, m_cameraPosition, m_pixelIterations, m_pixelPositions, m_pixelDepths); } else #endif { binaryPartitionSearch<8> << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelDirs, m_pixelDeltaDepths, m_cameraPosition, m_pixelIterations, m_pixelPositions, m_pixelDepths); } hipDeviceSynchronize(); endTimer(); } { startTimer("Update Parameters"); // Update initial iteration counts of the next step by taking minimum iterations of this step m_initialIterations = 0; cpyDeviceToHost((void*)m_pixelIterationsHost, (void*)(m_pixelIterations), m_numPixels * sizeof(int)); int m_initialIterations = INT_MAX; for (int i = 0; i < (int)m_numPixels; ++i) { if (m_pixelIterationsHost[i] > 0 && m_initialIterations > m_pixelIterationsHost[i]) { m_initialIterations = m_pixelIterationsHost[i]; } } // Update focal depth and initial delta step of the next step based on the result of this step if (m_needRecalculate) { float centerDepth; int offset; if (m_mortonCurve) { offset = (int)encodeMortonCurveHost(m_width / 2, m_height / 2); } else { offset = m_numPixels / 2 - m_width / 2; } cpyDeviceToHost((void*)&centerDepth, (void*)(m_pixelDepths + offset), sizeof(float)); if (centerDepth > s_minDepth && centerDepth < s_maxDepth) { m_focalDepth = centerDepth; m_initialDeltaStep = centerDepth * m_depthToDeltaStepRate; } m_needRecalculate = false; } endTimer(); } // Calculate normal if(m_normalMode != NoNormal) { startTimer("Calculate Normal"); hipBindTexture(0, s_positionTexture, m_pixelPositions, m_numPixels * sizeof(float3)); switch (m_normalMode) { case PseudoScreenSpace: hipLaunchKernelGGL(( samplePseudoScreenSpaceNormals_kernel), dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_cameraPosition, m_mortonCurve, m_pixelNormals); break; case ScreenSpace: hipLaunchKernelGGL(( sampleScreenSpaceNormals_kernel), dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_cameraPosition, m_mortonCurve, m_pixelNormals); break; } hipUnbindTexture(s_positionTexture); hipDeviceSynchronize(); endTimer(); } const unsigned int numLights = (unsigned int)m_lightPositionsHost.size(); { startTimer("Set Color"); if (!m_lightPositions && numLights > 0) { allocateArray((void**)&m_lightPositions, numLights * sizeof(float3)); cpyHostToDevice((void*)m_lightPositions, (void*)(&m_lightPositionsHost.front()), numLights * sizeof(float3)); } // Set colors setColorFromPos << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelPositions, m_pixelDepths, m_pixelNormals, m_cameraPosition, m_lightPositions, numLights, m_normalMode != NoNormal, m_coloringMode == Colorful, m_pixelColorsFloat); hipDeviceSynchronize(); endTimer(); } // Cast shadow if(m_castShadow) { startTimer("Cast Shadow"); castShadow<8> << <m_numBlocks, m_numThreads >> >( m_numPixels, m_minimumIterations, m_pixelPositions, m_pixelDepths, m_lightPositions, numLights, m_pixelColorsFloat); hipDeviceSynchronize(); endTimer(); } // Apply SSAO if(m_ssaoEnabled) { startTimer("SSAO"); hipBindTexture(0, s_depthTexture, m_pixelDepths, m_numPixels * sizeof(float)); hipLaunchKernelGGL(( applySSAO_kernel), dim3(m_numBlocks), dim3(m_numThreads) , 0, 0, m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_mortonCurve, m_pixelColorsFloat ); hipUnbindTexture(s_depthTexture); hipDeviceSynchronize(); endTimer(); } // Convert color from float to byte { startTimer("Prepare Result"); hipLaunchKernelGGL(( colFloatToByte), dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, m_pixelColorsFloat, (int)m_width, m_mortonCurve, pixcelColorsHost); hipDeviceSynchronize(); endTimer(); } if (m_profile) { printf("--------------------\n"); clock_t endTimer = clock(); printMilliSeconds(timer, endTimer, "Total"); printf("--------------------\n"); m_profile = false; } } void MandelbulbRenderer::setMinimumIterations(int minItr) { m_minimumIterations = minItr; m_initialIterations = 0; } void MandelbulbRenderer::setIterationAccelerator(float factor) { m_iterationAccelerator = factor; m_initialIterations = 0; } void MandelbulbRenderer::enableAdaptiveIterations(bool enable) { m_adaptiveIterationCount = enable; if (m_adaptiveIterationCount) { m_minimumIterations = 3; allocateAdaptiveIterationsMemory(); } else { freeAdaptiveIterationsMemory(); } m_initialIterations = 0; } void MandelbulbRenderer::enableDistanceEstimation(bool enable) { m_distanceEstimation = enable; } void MandelbulbRenderer::enableUpsampling(bool enable) { m_upsampling = enable; if (m_upsampling) { allocateUpsamplingMemory(); } else { freeUpsamplingMemory(); } } void MandelbulbRenderer::enableMortonCurveIndexing(bool enable) { m_mortonCurve = enable; // Set up pixel coordinates hipLaunchKernelGGL(( MandelbulbCudaKernel::setTexcoords), dim3(m_numBlocks), dim3(m_numThreads), 0, 0, m_numPixels, (int)m_width, (int)m_height, m_mortonCurve, m_texcoords); } void MandelbulbRenderer::addLight(const float3& pos) { m_lightPositionsHost.push_back(pos); if (m_lightPositions) { freeArray((void**)m_lightPositions); } } void MandelbulbRenderer::setDepthToDeltaStepRate(float rate) { m_depthToDeltaStepRate = rate; m_initialDeltaStep = m_focalDepth * m_depthToDeltaStepRate; } // Allocate buffers void MandelbulbRenderer::allocateMemory() { const int pixelsTimeFloat3 = m_numPixels * sizeof(float3); const int pixelsTimeFloat = m_numPixels * sizeof(float); allocateArray((void**)&m_pixelColorsFloat, pixelsTimeFloat3); allocateArray((void**)&m_pixelDirs, pixelsTimeFloat3); allocateArray((void**)&m_pixelNormals, pixelsTimeFloat3); allocateArray((void**)&m_pixelPositions, pixelsTimeFloat3); allocateArray((void**)&m_texcoords, m_numPixels * sizeof(int2)); allocateArray((void**)&m_pixelDepths, pixelsTimeFloat); allocateArray((void**)&m_pixelDeltaDepths, pixelsTimeFloat); allocateArray((void**)&m_pixelIterations, m_numPixels * sizeof(int)); allocateArray((void**)&m_numRayMarchSteps, m_numPixels * sizeof(int)); m_pixelIterationsHost = new int[m_numPixels]; if (m_adaptiveIterationCount) { allocateAdaptiveIterationsMemory(); if (m_upsampling) { allocateUpsamplingMemory(); } } } void MandelbulbRenderer::allocateAdaptiveIterationsMemory() { if (m_pixelDepthDiffs) { // Already allocated return; } const int pixelsTimeFloat = m_numPixels * sizeof(float); allocateArray((void**)&m_pixelDepthDiffs, pixelsTimeFloat); allocateArray((void**)&m_pixelDepthsTmp, pixelsTimeFloat); } void MandelbulbRenderer::allocateUpsamplingMemory() { if (m_pixelDirsLow) { // Already allocated return; } freeAdaptiveIterationsMemory(); int numPixels = m_numPixels / 2; const int pixelsTimeFloat3 = numPixels * sizeof(float3); const int pixelsTimeFloat = numPixels * sizeof(float); allocateArray((void**)&m_pixelDirsLow, pixelsTimeFloat3); allocateArray((void**)&m_pixelDepthsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelDeltaDepthsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelIterationsLow, numPixels * sizeof(int)); allocateArray((void**)&m_pixelDepthDiffsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelDepthsTmpLow, pixelsTimeFloat); allocateArray((void**)&m_texcoordsLow, numPixels * sizeof(int2)); hipLaunchKernelGGL(( MandelbulbCudaKernel::setTexcoords), dim3(m_numBlocksForLow), dim3(m_numThreadsForLow), 0, 0, numPixels, (int)m_width / 2, (int)m_height, m_mortonCurve, m_texcoordsLow); } void MandelbulbRenderer::freeMemory() { freeArray((void**)&m_pixelColorsFloat); freeArray((void**)&m_pixelDirs); freeArray((void**)&m_pixelNormals); freeArray((void**)&m_pixelPositions); freeArray((void**)&m_texcoords); freeArray((void**)&m_pixelDepths); freeArray((void**)&m_pixelDeltaDepths); freeArray((void**)&m_pixelIterations); freeArray((void**)&m_numRayMarchSteps); delete[] m_pixelIterationsHost; m_pixelIterationsHost = nullptr; } void MandelbulbRenderer::freeAdaptiveIterationsMemory() { if (m_pixelDepthDiffs) { freeArray((void**)&m_pixelDepthDiffs); freeArray((void**)&m_pixelDepthsTmp); } } void MandelbulbRenderer::freeUpsamplingMemory() { if (m_pixelDirsLow) { freeArray((void**)&m_pixelDirsLow); freeArray((void**)&m_pixelDepthsLow); freeArray((void**)&m_pixelDeltaDepthsLow); freeArray((void**)&m_pixelDepthDiffsLow); freeArray((void**)&m_pixelDepthsTmpLow); freeArray((void**)&m_texcoordsLow); } } void MandelbulbRenderer::startTimer(const char* timerName) { if (m_profile) { m_startTimer = clock(); m_timerName = timerName; } } void MandelbulbRenderer::endTimer() const { if (m_profile) { clock_t endTimer = clock(); printMilliSeconds(m_startTimer, endTimer, m_timerName); } } void MandelbulbRenderer::printMilliSeconds(const clock_t& c0, const clock_t& c1, const char* name) const { const clock_t deltaClock = c1 - c0; float msec = deltaClock * 1000.f / (float)CLOCKS_PER_SEC; printf("%s : %f msec \n", name, msec); }
bbb26cd01c790c8f7fc4df3d17ad43e444686c55.cu
// // Copyright (c) 2018 Kohei Nagasawa // Read LICENSE.md for license condition of this software // #include "mandelbulbRenderer.cuh" #include "cudaUtils.cuh" #ifdef ENABLE_DOUBLE_PRECISION #include "double3.h" #endif #include <cmath> #include <stdio.h> #include <time.h> #define FLOAT_MAX 3.402823466e+38F #define ANALYTICAL_DISTANCE_ESTIMATION using namespace CudaUtils; // Constants static constexpr float s_sphereRadiusSquared = 1.22f; static constexpr float s_mandelEvalThreshold = 2.f; static constexpr float s_rayAcceleration = 0.000002f; static constexpr float s_deltaPower = 2000000.0f; static constexpr float s_maxDepth = 1.5f; static constexpr float s_minDepth = 0.0002f; static constexpr int s_numBinaryIterations = 6; static constexpr int s_numIterationsOnHost = 20; // Tables for Morton curve static constexpr unsigned int s_mortonMasksHost[] = { 0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF, 0x0000FFFF }; __device__ const unsigned int s_mortonMasks[] = { 0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF, 0x0000FFFF }; // Texture buffers texture<float, 1, cudaReadModeElementType> s_positionTexture; texture<float, 1, cudaReadModeElementType> s_depthTexture; texture<float, 1, cudaReadModeElementType> s_deltaDepthTexture; texture<float, 1, cudaReadModeElementType> s_depthDiffTexture; texture<int, 1, cudaReadModeElementType> s_iterationTexture; #define ENCODE_MORTON_CURVE(MASK) \ unsigned int x = xPos; \ unsigned int y = yPos; \ x = (x | (x << 8)) & MASK[3]; \ x = (x | (x << 4)) & MASK[2]; \ x = (x | (x << 2)) & MASK[1]; \ x = (x | (x << 1)) & MASK[0]; \ y = (y | (y << 8)) & MASK[3]; \ y = (y | (y << 4)) & MASK[2]; \ y = (y | (y << 2)) & MASK[1]; \ y = (y | (y << 1)) & MASK[0]; \ const unsigned int result = x | (y << 1); \ return result // Encode Morton curve on CPU unsigned int encodeMortonCurveHost(unsigned short xPos, unsigned short yPos) { ENCODE_MORTON_CURVE(s_mortonMasksHost); } // Evaluate Mandelbulb at the given position on host CPU template <int N> bool evalMandelbulbOnHost(const float3& pos, int numIterations, int& iterationLeft) { float3 hc = pos; float r; // Evaluate mandelbulb at the point with the given iteration count for (int i = 0; i < numIterations; i++) { if (hc.x == 0) { iterationLeft = numIterations - i; break; } r = sqrt(hc.x * hc.x + hc.y * hc.y + hc.z * hc.z); if (r > s_mandelEvalThreshold) { // r is diverged, which means we will never hit the surface. // Abort iteration. iterationLeft = numIterations - i; } if (hc.x != 0) { float phi = atan(hc.y / hc.x); float theta = acos(hc.z / r); r = pow(r, N); theta = N * theta; phi = N * phi; const float sinth = sin(theta) * r; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * r; } hc = hc + pos; } // We didn't diverged withint the iteration count. // Then this point is considered as surface of mandelbulb. iterationLeft = 0; return true; } // Perform ray marching from the center of view on CPU void MandelbulbRenderer::rayMarchOnHost(float initialDeltaDepth) { printf("----- Begin Ray Marching -----\n"); const float3& direction = m_cameraForward; float3 originalPosition = m_cameraPosition; { const float cameraDistanceSquared = dot(originalPosition, originalPosition); if (cameraDistanceSquared >= s_sphereRadiusSquared) { // Camera is outside of the radius. // Ray should start from sphere's surface of the radius originalPosition = originalPosition + direction * sqrt(cameraDistanceSquared - s_sphereRadiusSquared); } } float3 pos = originalPosition; float deltaDepth = initialDeltaDepth > 0 ? initialDeltaDepth : m_initialDeltaStep; // Get iteration count const int numIterations = 10; while (1) { // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb bool result; int iterationLeft; { // Evaluate Mandelbulb result = evalMandelbulbOnHost<8>(pos, numIterations, iterationLeft); } // Calculate the current depth float depth = dot(pos - originalPosition, direction); printf("%f,%d\n", depth, numIterations - iterationLeft); if (result) { // We got a hit! printf("----- Hit : End of Ray Marching -----\n"); return; } // Update depth and position pos = pos + direction * deltaDepth; } printf("----- No Hit : End of Ray Marching -----\n"); return; } namespace MandelbulbCudaKernel { // Get pixel index __device__ unsigned int getPixelIndex() { return blockIdx.x * blockDim.x + threadIdx.x; } // Encode pixel coordenates into morton curve index __device__ unsigned int encodeMortonCurve(unsigned short xPos, unsigned short yPos) { ENCODE_MORTON_CURVE(s_mortonMasks); } // Decode morton curve index into pixel coordinates __device__ void decodeMortonCurve(unsigned int morton, unsigned short& xPos, unsigned short& yPos) { unsigned int x = morton & s_mortonMasks[0]; unsigned int y = (morton & (s_mortonMasks[0] << 1)) >> 1; x = (x | x >> 1) & s_mortonMasks[1]; x = (x | x >> 2) & s_mortonMasks[2]; x = (x | x >> 4) & s_mortonMasks[3]; x = (x | x >> 8) & s_mortonMasks[4]; y = (y | y >> 1) & s_mortonMasks[1]; y = (y | y >> 2) & s_mortonMasks[2]; y = (y | y >> 4) & s_mortonMasks[3]; y = (y | y >> 8) & s_mortonMasks[4]; xPos = x; yPos = y; } // Get index of pixel from its coordinate __device__ unsigned int getIndex(int x, int y, int width, bool mortonCurve) { if (mortonCurve) { return encodeMortonCurve((unsigned short)x, (unsigned short)y); } else { return x + y * width; } } // Get index of neighbor pixel __device__ unsigned int getNeighborIndex(unsigned int index, int xOffset, int yOffset, int width, bool mortonCurve) { if (mortonCurve) { unsigned short x, y; decodeMortonCurve(index, x, y); return encodeMortonCurve(x + (unsigned short)xOffset, y + (unsigned short)yOffset); } else { return index + xOffset + width * yOffset; } } // Set coordinates of each pixcel __global__ void setTexcoords(unsigned int numPixcels, int width, int height, bool mortonCurve, int2* texcoords) { const unsigned int id = getPixelIndex(); if (id < numPixcels) { int2& cord = texcoords[id]; if (mortonCurve) { unsigned short x, y; decodeMortonCurve(id, x, y); cord.x = x; cord.y = y; } else { const int widthIndex = id % width; const int heightIndex = (id - widthIndex) / width; cord.x = widthIndex; cord.y = heightIndex; } } } // Rotates a vector by angle around axis __device__ float3 rotate(const float3& vector, const float3& axis, const float angle) { float3 vectorOut; const float c = cos(angle); const float s = sin(angle); const float cosx = (1 - c) * axis.x; const float cosy = (1 - c) * axis.y; const float cosz = (1 - c) * axis.z; const float sinx = s * axis.x; const float siny = s * axis.y; const float sinz = s * axis.z; const float cosxy = cosx * axis.y; const float cosxz = cosx * axis.z; const float cosyz = cosy * axis.z; vectorOut.x = (c + cosx * axis.x) * vector.x + (cosxy - sinz) * vector.y + (cosxz + siny) * vector.z; vectorOut.y = (cosxy + sinz) * vector.x + (c + cosy * axis.y) * vector.y + (cosyz - sinx) * vector.z; vectorOut.z = (cosxz - siny) * vector.x + (cosyz + sinx) * vector.y + (c + cosz * axis.z) * vector.z; return vectorOut; } // Set ray marching directions for each pixel __global__ void setPixelDirection( const unsigned int numPixels, const int halfWidth, const int halfHeight, const int2* texcoords, const float3 forwardDir, const float3 upDir, const float3 sideDir, const float dagl, float3* pixelDirs) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const int2& cord = texcoords[id]; const float xAngle = (cord.x - halfWidth) * dagl; const float yAngle = (cord.y - halfHeight) * dagl; float3& pixelDir = pixelDirs[id]; pixelDir = rotate(forwardDir, upDir, xAngle); pixelDir = rotate(pixelDir, sideDir, yAngle); pixelDir = normalize(pixelDir); } } // Set ray marching directions for each pixel __global__ void setPixelDirectionLow( const unsigned int numPixels, const int halfWidth, const int halfHeight, const int2* texcoords, const float3 forwardDir, const float3 upDir, const float3 sideDir, const float dagl, float3* pixelDirs) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const int2& cord = texcoords[id]; float xAngle; if (cord.y % 2 == 0) { xAngle = (2 * cord.x - halfWidth) * dagl; } else { xAngle = (2 * cord.x + 1 - halfWidth) * dagl; } const float yAngle = (cord.y - halfHeight) * dagl; float3& pixelDir = pixelDirs[id]; pixelDir = rotate(forwardDir, upDir, xAngle); pixelDir = rotate(pixelDir, sideDir, yAngle); pixelDir = normalize(pixelDir); } } // Actual evaluation of mandelbulb at given position // Depth is used to determined the number of iterations // Returns 1 if we hit mandelbulb or negative value if we didn't // The smaller the returned value is, the sooner iteration ended // which means the further the point is from the surface template <int N> __device__ bool evalMandelbulb(const float3& pos, const int numIterations, int& iterationLeft, float& potential, float& dr) { float3 hc = pos; float r = 0.0f; dr = 1.0f; // Evaluate mandelbulb at the point with the given iteration count for (int i = 0; i < numIterations; i++) { if (hc.x == 0) { iterationLeft = numIterations - i; potential = log(r) / pow((float)N, float(i)); dr = 0.5f * log(r) * r / dr; return false; } r = sqrt(dot(hc, hc)); if (r > s_mandelEvalThreshold) { // r is diverged, which means we will never hit the surface. // Abort iteration. iterationLeft = numIterations - i; potential = log(r) / pow((float)N, float(i)); dr = 0.5f * log(r) * r / dr; return false; } if (hc.x != 0) { float phi = atan(hc.y / hc.x); float theta = acos(hc.z / r); r = pow(r, N); theta = N * theta; phi = N * phi; const float sinth = sin(theta) * r; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * r; } hc += pos; dr = pow(r, N - 1) * (float)N * dr + 1.0f; } // We didn't diverged withint the iteration count. // Then this point is considered as surface of mandelbulb. iterationLeft = 0; potential = 0; return true; } template <int N> __device__ float potential(int numIteration, const float3& pos) { float3 hc = pos; float r; for (int i = 0; i < numIteration; ++i) { r = sqrt(dot(hc, hc)); if (r > s_mandelEvalThreshold) { return log(r) / pow((float)N, float(i)); } float theta = acos(hc.z / r); float phi = atan(hc.y / hc.x); float zr = pow(r, (float)N); theta = theta*(float)N; phi = phi*(float)N; const float sinth = sin(theta) * zr; hc.x = sinth * cos(phi); hc.y = sinth * sin(phi); hc.z = cos(theta) * zr; hc = pos + hc; } return 0; } // Calculate the number of iterations used for Mandelbulb evaluation __device__ int getNumIterations(float depth, float iterationAccelerator, int minimumIterations) { return (int)(-log(pow(depth, iterationAccelerator)) / log(50.f)) + minimumIterations; } // Evaluate Mandelbulb by ray marching template <int N> __device__ bool rayMarchWithAcceleration( float3 pos, const float3& direction, float iterationAccelerator, int minimumIterations, int fixedIteration, bool distanceEstimation, int& pixelIteration, int& numSteps, float& depth, float& deltaDepth) { float rayAccelerator = 0; float pot, dr; int currentIteration = -1; int iterationLeft; bool result; numSteps = 0; while (1) { ++numSteps; // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb { currentIteration = fixedIteration > 0 ? fixedIteration : getNumIterations(depth, iterationAccelerator, minimumIterations); // Evaluate Mandelbulb result = evalMandelbulb<N>(pos, currentIteration, iterationLeft, pot, dr); // Update iteration count of this pixel pixelIteration = currentIteration; // [todo] Review and validate this code //if (!result && iterationLeft > 1) //{ // // Iteration count is too big. Make it smaller // minimumIterations = currentIteration - iterationLeft + 1; //} } if (result) { // We got a hit! return true; } // Update depth and position float delta = deltaDepth; float acceleration = s_rayAcceleration; if (distanceEstimation) { #ifndef ANALYTICAL_DISTANCE_ESTIMATION // Approximate derivative by gradient const float eps = 0.01f * deltaDepth; float3 posx = pos; posx.x += eps; float3 posy = pos; posy.y += eps; float3 posz = pos; posz.z += eps; float3 grad; grad.x = potential<N>(currentIteration, posx); grad.y = potential<N>(currentIteration, posy); grad.z = potential<N>(currentIteration, posz); grad = (grad - pot) / eps; float newDelta = max(delta, (0.5f / exp(pot))*sinh(pot) / sqrt(dot(grad, grad))); #else // Analytical solution float newDelta = max(delta, dr); #endif acceleration *= newDelta / delta; delta = newDelta; } depth += delta; pos = pos + direction * delta; // Accelerate the ray gradually while it gets far from the camera rayAccelerator += acceleration; deltaDepth *= pow(s_deltaPower, rayAccelerator + iterationLeft * acceleration); // [todo] Investigate this calculation } return false; } // Perform ray marching against mandelbulb // and store depth to mandelbulb and delta depth of ray marching for each pixel template <int N> __global__ void raymarchMandelbulb_kernel( const unsigned int numPixels, const float3* pixelDirs, const float3 cameraPosition, float deltaDepth, float iterationAccelerator, int minimumIterations, bool distanceEstimation, int* pixelIterations, int* numRayMarchSteps, float* pixelDepths, float* pixelDeltaDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Reset iteration count of mandelbulb evaluation pixelIterations[id] = 0; float3 pos; const float3& direction = pixelDirs[id]; // First, check if we need to evaluate mandelbulb from the first place bool shouldEval = false; { const float cameraDistanceSquared = dot(cameraPosition, cameraPosition); const float projection = dot(cameraPosition, direction); if (projection > 0) { // Camera is facing the opposite way of the origin // We don't need to evaluate at all if the camera is outside of the radius if (cameraDistanceSquared < s_sphereRadiusSquared) { // If the camera is inside the radius, ray should start from there pos = cameraPosition + deltaDepth * direction; shouldEval = true; } } else { // Camera is facing to mandelbulb // Now check if the direction of ray can possibly hit mandelbulb const float dist = cameraDistanceSquared - projection * projection; if (dist < s_sphereRadiusSquared) { // Ray can hit mandelbulb // Now determine the start position of the ray if (cameraDistanceSquared < s_sphereRadiusSquared) { // If the camera is inside the radius, ray should start from there pos = cameraPosition + deltaDepth * direction; } else { // Camera is outside of the radius. // Ray should start from sphere's surface of the radius float d1 = -projection; float d2 = sqrt(s_sphereRadiusSquared - dist); pos = cameraPosition + direction * (d1 - d2 + deltaDepth); } shouldEval = true; } } } // Secondly, evaluate mandelbulb if needed if (shouldEval) { // Calculate the current depth float depth = dot(pos - cameraPosition, direction); // Perform ray marching bool hit = rayMarchWithAcceleration<N>( pos, direction, iterationAccelerator, minimumIterations, -1, distanceEstimation, pixelIterations[id], numRayMarchSteps[id], depth, deltaDepth); if (hit) { // We hit mandelbulb. // Store depth and delta depth of the hit pixelDepths[id] = depth; pixelDeltaDepths[id] = deltaDepth; return; } } // No hit pixelDepths[id] = FLOAT_MAX; pixelDeltaDepths[id] = 1.0f; pixelIterations[id] = 0; return; } // Perform bisection search to get surface position with higher precision template <int N> __global__ void binaryPartitionSearch( const unsigned int numPixels, const float3* pixelDirs, const float* pixelDeltaDepths, const float3 cameraPosition, const int* pixelIterations, float3* pixelPositions, float* pixelDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } float depth = pixelDepths[id]; if (depth != FLOAT_MAX) { // We have hit mandelbulb at this pixel const float3& dir = pixelDirs[id]; const float deltaDepth = pixelDeltaDepths[id]; float currentDelta = deltaDepth; // Move backward one step float3 pos = cameraPosition + dir * (depth - deltaDepth); // Perform bi-section search for (int i = 0; i < s_numBinaryIterations; i++) { // Move half way currentDelta *= 0.5f; pos = pos + dir * currentDelta; depth += currentDelta; int numIterations = pixelIterations[id]; int iterationLeft; float pot, grad; if (evalMandelbulb<N>(pos, numIterations, iterationLeft, pot, grad)) { // We got a hit // Revert the last step and test again with smaller delta pos = pos - dir * currentDelta; depth -= currentDelta; } } // Store the result pixelDepths[id] = depth + currentDelta; pixelPositions[id] = pos + dir * currentDelta; } else { // There was no hit // Just set invalid position pixelPositions[id] = { FLOAT_MAX, FLOAT_MAX, FLOAT_MAX }; } } // Drill down mandelbulb's surface by changing iteration count adaptively based on smoothness of the surface // This kernel should be followed by compareDepthDiffs_kernel template <int N> __global__ void adaptiveIteration_kernel( const unsigned int numPixels, const float3* pixelDirs, const float3 cameraPosition, int minimumIterations, float iterationToDeltaStepRate, bool distanceEstimation, int* pixelIterations, float* pixelDepths, float* pixelDepthsTmp, float* pixelDeltaDepths, float* depthDiffs) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } if (pixelIterations[id] <= 0) { // This pixel is already done. Abort further drilling. return; } // Increment iteration count by one int currentIteration = pixelIterations[id] + 1; const float3& dir = pixelDirs[id]; float deltaDepth = pixelDeltaDepths[id] * iterationToDeltaStepRate; float depth = pixelDepths[id]; float3 pos = cameraPosition + dir * depth; bool hit = false; // Perform ray marching with the new iteration count but without ray acceleration while (1) { // Check if we are still inside the radius if (dot(pos, pos) > s_sphereRadiusSquared) { break; } // Evaluate mandelbulb int iterationLeft; float pot, dr; bool result = evalMandelbulb<N>(pos, currentIteration, iterationLeft, pot, dr); if (result) { // We hit the surface hit = true; break; } // [todo] Review and validate this code // //else if (iterationLeft > 1) //{ // // We didn't hit the surface and the iteration count is too big now. // // This means the ray is now moving aways from the surface. // // Reduce the iteration count and perform ray march with acceleration again. // // Calculate new iteration count and update delta depth // int numIterations = max(currentIteration - iterationLeft + 1, minimumIterations); // deltaDepth *= float(iterationLeft - 1) / iterationToDeltaStepRate; // // Perform ray marching // int dummy; // hit = rayMarchWithAcceleration<N>( // pos, // dir, // 0.f, // 0, // numIterations, // distanceEstimation, // dummy, // depth, // deltaDepth // ); // if (hit) // { // // We hit a new surface // // Continue the loop from there with new parameters // pos = cameraPosition + dir * depth; // pixelDepths[id] = depth; // pixelDeltaDepths[id] = deltaDepth; // pixelIterations[id] = numIterations; // currentIteration = numIterations + 1; // deltaDepth *= iterationToDeltaStepRate; // hit = false; // isFirstStep = true; // //continue; // } // // We didn't hit anything. // // Just break the loop to make this pixel invalid. // //break; //} // Update depth and position and evaluate mandelbulb again float delta = deltaDepth; if (distanceEstimation) { #ifndef ANALYTICAL_DISTANCE_ESTIMATION // Approximate derivative by gradient const float eps = 0.01f * deltaDepth; float3 posx = pos; posx.x += eps; float3 posy = pos; posy.y += eps; float3 posz = pos; posz.z += eps; float3 grad; grad.x = potential<N>(currentIteration, posx); grad.y = potential<N>(currentIteration, posy); grad.z = potential<N>(currentIteration, posz); grad = (grad - pot) / eps; float newDelta = max(delta, (0.5f / exp(pot))*sinh(pot) / sqrt(dot(grad, grad))); #else // Analytical solution float newDelta = max(delta, dr); #endif delta = newDelta; } depth += delta; pos = pos + dir * delta; } if (hit) { // When we hit a new surface with incremented iteration count, set depth difference from the original and temporary depth depthDiffs[id] = (depth - pixelDepths[id]) / depth; pixelDepthsTmp[id] = depth; } else { // We didn't hit any surface with the new iteration // Just set invalid values pixelDepths[id] = FLOAT_MAX; depthDiffs[id] = FLOAT_MAX; pixelIterations[id] = 0; } } // Compare depth difference of nearby pixels and calculate screen space laplacian of it to determine // if the surface is still smooth enough to drill down further __global__ void compareDepthDiffs_kernel( const unsigned int numPixels, const int width, const int height, const float laplacianThreshold, const float* pixelDepthDiffs, const float* pixelDepthsTmp, const int2* texcoords, float iterationToDeltaStepRate, bool mortonCurve, bool upsampling, int* pixelIterations, float* pixelDepths, float* pixelDeltaDepths) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const float depth = pixelDepths[id]; if (depth == FLOAT_MAX) { return; } const float diff = tex1Dfetch(s_depthDiffTexture, id); const float depthPlusDiff = depth + diff * pixelDepthsTmp[id]; const int2& cord = texcoords[id]; const int x = cord.x; const int y = cord.y; // Threshold of difference of depth between two pixels to calculate second derivative of depth diff // [todo] investigate if we need this condiyion //const float threshold = FLOAT_MAX; //const float invDepth = 1.f / depthPlusDiff; // Gradient of depth difference at neighbor pixels float gradDiffX1, gradDiffX2; float gradDiffY1, gradDiffY2; const int neighborOffsetX = upsampling ? 1 : 2; const int neighborOffsetY = 2; // [todo] Laplacian should be calculated only based on neighbors with the same iteration count as this pixel. // We are not checking iteration count of neighbors now but we should. unsigned int neighbor; float neighborDiff; // Left pixel if (x > 1) { neighbor = getNeighborIndex(id, -neighborOffsetX, 0, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffX1 = diff - neighborDiff; } //else //{ // gradDiffX1 = 0; //} } else { gradDiffX1 = 0; } // Right pixel if (x < width - neighborOffsetX) { neighbor = getNeighborIndex(id, neighborOffsetX, 0, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffX2 = neighborDiff - diff; } //else //{ // gradDiffX2 = 0; //} } else { gradDiffX2 = 0; } // Up pixel if (y > 1) { neighbor = getNeighborIndex(id, 0, -neighborOffsetY, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffY1 = diff - neighborDiff; } //else //{ // gradDiffY1 = 0; //} } else { gradDiffY1 = 0; } // Down pixel if (y < height - neighborOffsetY) { neighbor = getNeighborIndex(id, 0, neighborOffsetY, width, mortonCurve); //if (fabs(depth - tex1Dfetch(s_depthTexture, neighbor)) * invDepth < threshold) { neighborDiff = tex1Dfetch(s_depthDiffTexture, neighbor); gradDiffY2 = neighborDiff - diff; } //else //{ // gradDiffY2 = 0; //} } else { gradDiffY2 = 0; } // Calculate laplacian of depth difference const float laplacian = (gradDiffX2 - gradDiffX1) + (gradDiffY2 - gradDiffY1); if (fabs(laplacian) < laplacianThreshold) { // If laplacian is smaller than the threshold, it means the surface is smooth enough to drill down one more iteration pixelIterations[id] += 1; pixelDepths[id] = depthPlusDiff; pixelDeltaDepths[id] *= iterationToDeltaStepRate; } else { // Otherwise, negate the iteration count to indicate we cannot drill down this pixel any further pixelIterations[id] = -pixelIterations[id]; } } // Finalize data after adaptive drilling down __global__ void finalizeIteration_kernel( const unsigned int numPixels, //const int width, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Set negative iteration count back to the original positive value if (pixelIterations[id] < 0) { pixelIterations[id] = -pixelIterations[id]; } // Take average of neaby iteration counts // [todo] Investigate if we really need this process. // It seems it could just increase artifacts. // //const int iteration = pixelIterations[id]; //int count = 1; //int sum = iteration; //for (int i = -2; i <= 2; ++i) //{ // for (int j = -2; j <= 2; ++j) // { // //int neighbor = id + i + width * j; // const int neighbor = getNeighborIndex(id, i, j, width); // const int neighborIteration = tex1Dfetch(s_iterationTexture, neighbor); // if (abs(iteration - neighborIteration) < 3) // { // ++count; // sum += neighborIteration; // } // } //} //pixelIterations[id] = sum / count; } // Copy data from low resolution buffer to high resolution buffer __global__ void copyFromLowToHigh( const unsigned int numPixels, int width, bool mortonCurve, const int2* texcoordsLow, const float* pixelDepthsLow, const float* pixelDeltaDepthsLow, const int* pixelIterationsLow, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } // Calculate pixel index of high resolution buffers const int2& texcoord = texcoordsLow[id]; const int x = texcoord.y % 2 == 0 ? texcoord.x * 2 : texcoord.x * 2 + 1; unsigned int highResId = getIndex(x, texcoord.y, width, mortonCurve); pixelDepths[highResId] = pixelDepthsLow[id]; pixelDeltaDepths[highResId] = pixelDeltaDepthsLow[id]; pixelIterations[highResId] = pixelIterations[id]; } __device__ void getValuesFromClosestNeighbor( const unsigned int neighbor, float& closestDepth, float& closestDeltaDepth, int& closestIteration, int& maxIteration ) { float depth = tex1Dfetch(s_depthTexture, neighbor); int iteration = tex1Dfetch(s_iterationTexture, neighbor); if (depth < closestDepth) { closestDepth = depth; closestIteration = iteration; closestDeltaDepth = tex1Dfetch(s_deltaDepthTexture, neighbor); } if (iteration > maxIteration) { maxIteration = iteration; } } template <int N> __global__ void upsample( const unsigned int numPixels, const float3& cameraPosition, const int2* texcoordsLow, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int x = texcoord.x; const int y = texcoord.y; float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { const int halfWidth = width / 2; unsigned int neighbor; const int oddOffset = y % 2 == 0 ? 0 : 1; // Left pixel if(oddOffset || x > 0) { neighbor = getNeighborIndex(id, -oddOffset, 0, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if(!oddOffset || x + 1 < halfWidth) { neighbor = getNeighborIndex(id, 1-oddOffset, 0, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if(y > 0) { neighbor = getNeighborIndex(id, 0, -1, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(id, 0, 1, halfWidth, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } const unsigned int highResId = getIndex(y % 2 == 0 ? 2 * x + 1 : 2 * x, y, width, mortonCurve); const float3& dir = pixelDirs[highResId]; float3 pos = cameraPosition + dir * closestDepth; int iterationLeft; float potential, dr; int res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); if (res > 0) { while (res > 0) { closestDepth -= closestDeltaDepth; pos -= dir * closestDeltaDepth; res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); } } else { closestDepth = closestDepth + closestDeltaDepth; pos += dir * closestDeltaDepth; int pixelIteration; rayMarchWithAcceleration<N>( pos, dir, iterationAccelerator, minimumIterations, -1, false, pixelIteration, closestIteration, closestDepth, closestDeltaDepth); } pixelDepths[highResId] = closestDepth; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } // Upsample template <int N> __global__ void upsamplePhase1( const unsigned int numPixels, const float3& cameraPosition, int widthLow, int heightLow, int width, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float* pixelDepthsLow, const float* pixelDeltaDepthsLow, const int* pixelIterationsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; for (int i = 0; i <= 1; ++i) { if (baseX + i <= widthLow) { continue; } for (int j = 0; j <= 1; ++j) { if (baseY + j <= heightLow) { continue; } neighbor = getNeighborIndex(id, i, j, widthLow, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } } const int x = 2 * baseX + 1; const int y = 2 * baseY + 1; const unsigned int highResId = getIndex(x, y, width, mortonCurve); const float3& dir = pixelDirs[highResId]; float3 pos = cameraPosition + dir * closestDepth; int iterationLeft; float potential, dr; int res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); if (res > 0) { while (res > 0) { closestDepth -= closestDeltaDepth; pos -= dir * closestDeltaDepth; res = evalMandelbulb<N>(pos, closestIteration, iterationLeft, potential, dr); } } else { closestDepth = closestDepth + closestDeltaDepth; pos += dir * closestDeltaDepth; int pixelIteration; rayMarchWithAcceleration<N>( pos, dir, iterationAccelerator, minimumIterations, -1, false, pixelIteration, closestIteration, closestDepth, closestDeltaDepth); } pixelDepths[highResId] = closestDepth; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } template <int N> __global__ void upsamplePhase2( const unsigned int numPixels, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; const int x = (baseY % 2 == 0) ? (2 * baseX + 1) : (2 * baseX); const int y = 2 * baseY; const unsigned int highResId = getIndex(x, y, width, mortonCurve); float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; // Left pixel if (x > 0) { neighbor = getNeighborIndex(highResId, -1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if (x + 1 < width) { neighbor = getNeighborIndex(highResId, 1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if (y > 0) { neighbor = getNeighborIndex(highResId, 0, -1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(highResId, 0, 1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } pixelDepths[highResId] = FLOAT_MAX; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } template <int N> __global__ void upsamplePhase3( const unsigned int numPixels, int width, int height, int minimumIterations, float iterationAccelerator, bool mortonCurve, const int2* texcoordsLow, const float3* pixelDirs, float* pixelDepths, float* pixelDeltaDepths, int* pixelIterations) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& texcoord = texcoordsLow[id]; const int baseX = texcoord.x; const int baseY = texcoord.y; const int x = 2 * baseX; const int y = (baseX % 2 == 0) ? (2 * baseY + 1) : (2 * baseY); const unsigned int highResId = getIndex(x, y, width, mortonCurve); float closestDepth = FLOAT_MAX; float closestDeltaDepth = 0; int closestIteration = 0; int maxIteration = 0; { unsigned int neighbor; // Left pixel if (x > 0) { neighbor = getNeighborIndex(highResId, -1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Right pixel if (x + 1 < width) { neighbor = getNeighborIndex(highResId, 1, 0, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Up pixel if (y > 0) { neighbor = getNeighborIndex(highResId, 0, -1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } // Down pixel if (y + 1 < height) { neighbor = getNeighborIndex(highResId, 0, 1, width, mortonCurve); getValuesFromClosestNeighbor(neighbor, closestDepth, closestDeltaDepth, closestIteration, maxIteration); } } pixelDepths[highResId] = FLOAT_MAX; pixelDeltaDepths[highResId] = closestDeltaDepth; pixelIterations[highResId] = closestIteration; } // Evaluate normal of pixels by simplified screen space calculation __global__ void samplePseudoScreenSpaceNormals_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, const float3 cameraPos, bool mortonCurve, float3* pixelNormals) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const unsigned int id3 = id * 3; // Get surface position float3 pos; pos.x = tex1Dfetch(s_positionTexture, id3); pos.y = tex1Dfetch(s_positionTexture, id3 + 1); pos.z = tex1Dfetch(s_positionTexture, id3 + 2); if (pos.x == FLOAT_MAX) return; // Get pixel coordinates const int2& texcoord = texcoords[id]; const int x = texcoord.x; const int y = texcoord.y; float p1, p2; unsigned int neighborId; float3 nor = { 0.0f, 0.0f, 0.0f }; // Right pixel if (x > 0) { neighborId = 3 * getNeighborIndex(id, -1, 0, width, mortonCurve); p1 = pos.x - tex1Dfetch(s_positionTexture, neighborId); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.x += p2; nor.z += p1; } // Left pixel if (x + 1 < width) { neighborId = 3 * getNeighborIndex(id, 1, 0, width, mortonCurve); p1 = pos.x - tex1Dfetch(s_positionTexture, neighborId); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.x -= p2; nor.z -= p1; } // Up pixel if (y > 0) { neighborId = 3 * getNeighborIndex(id, 0, -1, width, mortonCurve); p1 = pos.y - tex1Dfetch(s_positionTexture, neighborId + 1); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.y += p2; nor.z += p1; } // Down pixel if (y + 1 < height) { neighborId = 3 * getNeighborIndex(id, 0, 1, width, mortonCurve); p1 = pos.y - tex1Dfetch(s_positionTexture, neighborId + 1); p2 = pos.z - tex1Dfetch(s_positionTexture, neighborId + 2); nor.y -= p2; nor.z -= p1; } pixelNormals[id] = normalize(nor); } // Evaluate normal of pixels by screen space depth __global__ void sampleScreenSpaceNormals_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, const float3 cameraPos, bool mortonCurve, float3* pixelNormals) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const unsigned int id3 = id * 3; // Get surface position float3 pos; pos.x = tex1Dfetch(s_positionTexture, id3); pos.y = tex1Dfetch(s_positionTexture, id3 + 1); pos.z = tex1Dfetch(s_positionTexture, id3 + 2); if (pos.x == FLOAT_MAX) return; // Get pixel coordinates const int2& texcoord = texcoords[id]; const int x = texcoord.x; const int y = texcoord.y; unsigned int neighborId; float3 neighborPos1, neighborPos2; // Right pixel if (x != 0) { neighborId = 3 * getNeighborIndex(id, -1, 0, width, mortonCurve); neighborPos1.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos1.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos1.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos1 = pos; } // Left pixel if (x + 1 != width) { neighborId = 3 * getNeighborIndex(id, 1, 0, width, mortonCurve); neighborPos2.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos2.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos2.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos2 = pos; } const float3 dx = neighborPos2 - neighborPos1; // Up pixel if (y != 0) { neighborId = 3 * getNeighborIndex(id, 0, -1, width, mortonCurve); neighborPos1.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos1.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos1.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos1 = pos; } // Down pixel if (y + 1 != height) { neighborId = 3 * getNeighborIndex(id, 0, 1, width, mortonCurve); neighborPos2.x = tex1Dfetch(s_positionTexture, neighborId); neighborPos2.y = tex1Dfetch(s_positionTexture, neighborId + 1); neighborPos2.z = tex1Dfetch(s_positionTexture, neighborId + 2); } else { neighborPos2 = pos; } const float3 dy = neighborPos2 - neighborPos1; pixelNormals[id] = normalize(cross(dx, dy)); } // Apply SSAO effect __global__ void applySSAO_kernel( const unsigned int numPixels, const int width, const int height, const int2* texcoords, bool mortonCurve, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } const int2& cord = texcoords[id]; const int x = cord.x; const int y = cord.y; float depth = tex1Dfetch(s_depthTexture, id); unsigned int shieldCount = 1; unsigned int neighborId; float neighborDepth; for (int i = -3; i <= 3; ++i) { if ((x == 0 && i < 0) || (x + 1 == width && i > 0)) { continue; } for (int j = -3; j <= 3; ++j) { if (i == 0 && j == 0) { continue; } if (i * i + j * j > 9) { continue; } if ((y == 0 && j < 0) || (y + 1 == height && j > 0)) { continue; } neighborId = getNeighborIndex(id, i, j, width, mortonCurve); neighborDepth = tex1Dfetch(s_depthTexture, neighborId); if (depth > neighborDepth) { ++shieldCount; } } } float shield = 1.f / (float)shieldCount * 9.f + 0.3f; // Clamp shield if (shield > 1.0f) { return; } // Modify the color pixelColors[id].x *= shield; pixelColors[id].y *= shield; pixelColors[id].z *= shield; } template <int N> __global__ void castShadow( const unsigned int numPixels, const unsigned int minimumIterations, const float3* pixelPositions, const float* pixelDepths, const float3* lightPositions, const unsigned int numLights, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } if (id < numPixels) { const float3& pixelPos = pixelPositions[id]; const float depth = pixelDepths[id]; float deltaDepth = 0.0005f * depth; bool lit = false; for (int l = 0; l < numLights; ++l) { const float3 dir = lightPositions[l] - pixelPos; float3 pos = pixelPos + dir * deltaDepth; float count = 0; int i; for (i = 0; i < 25; i++) { if (dot(pos, pos) > s_sphereRadiusSquared) { lit = true; break; } const int currentIteration = getNumIterations(depth, 0, minimumIterations); int iterationLeft; float residual, potential; if (evalMandelbulb<N>(pos, currentIteration, iterationLeft, potential, residual)) { break; } pos = pos + dir * deltaDepth; count += 0.0002f; deltaDepth *= pow(1000.0f, count); } if (i == 25) { lit = true; break; } } if (!lit) { pixelColors[id] *= 0.5f; } } } // Calculate color of pixels based on position, normal and lights __global__ void setColorFromPos( const unsigned int numPixels, const float3* pixelPositions, const float* pixelDepths, const float3* pixelNormals, const float3 cameraPosition, const float3* lightPositions, const unsigned int numLights, bool useNormal, bool colorfulMode, float3* pixelColors) { const unsigned int id = getPixelIndex(); if (id >= numPixels) { return; } float3 col; const float3& pos = pixelPositions[id]; if (pixelDepths[id] != FLOAT_MAX) { const float gain = 0.2f; const float ampl = 1.2f; col = (pos + ampl) * gain; if (useNormal) { const float3& normal = pixelNormals[id]; const float div = dot(normal, pos); const float divSquared = div * div; float diffuse = 0; float specular = 0; for (int i = 0; i < numLights; ++i) { const float3& lightPosition = lightPositions[i]; diffuse += 0.5f * max(dot(normal, normalize(lightPosition - pos)), 0.0f); specular += pow(max(dot(normal, normalize(cameraPosition - lightPosition)), 0.0f), 8.0f); } diffuse *= 2.f; specular *= 0.5f; const float ambient = 1.0f; const float ambientAndDiffuse = ambient + diffuse; if (colorfulMode) { float col1 = 1.0f, col2 = 1.0f; if (div > 0.0f) { col2 += divSquared; } else { col1 += divSquared; } const float col3 = 0.003f / (divSquared * divSquared + 0.01f); col.x = col.x * ambientAndDiffuse * col1 + specular; col.y = col.y * ambientAndDiffuse + specular + col3; col.z = col.z * ambientAndDiffuse * col2 + specular; } else { col.x = col.x * ambientAndDiffuse + specular; col.y = col.y * ambientAndDiffuse + specular; col.z = col.z * ambientAndDiffuse + specular; } } // Visualize normal as color //if (useNormal) //{ // const float3& normal = pixelNormals[id]; // col = normal; //} } else { col.x = 0.0f; col.y = 0.0f; col.z = 0.0f; } pixelColors[id] = col; } // Convert floating point [0:1] color into 256 color __global__ void colFloatToByte( const unsigned int numPixels, const float3* pixelColorsFloat, int width, bool mortonCurve, unsigned char* pixelColorsUChar) { const unsigned int id = getPixelIndex(); if (id < numPixels) { const float3 col = pixelColorsFloat[id]; const float r = col.x > 1.f ? 1.0 : (col.x < 0.f ? 0.f : col.x); const float g = col.y > 1.f ? 1.0 : (col.y < 0.f ? 0.f : col.y); const float b = col.z > 1.f ? 1.0 : (col.z < 0.f ? 0.f : col.z); unsigned int outId; if (mortonCurve) { unsigned short x, y; decodeMortonCurve(id, x, y); outId = x + width * y; } else { outId = id; } unsigned char* colUChar = &pixelColorsUChar[outId * 3]; colUChar[0] = (unsigned char)(255 * r); colUChar[1] = (unsigned char)(255 * g); colUChar[2] = (unsigned char)(255 * b); } } #ifdef ENABLE_DOUBLE_PRECISION #include "mandelbulbRendererDouble.cu" #endif } // namespace MandelbulbCudaKernel // Costructor MandelbulbRenderer::MandelbulbRenderer(unsigned int width, unsigned int height) : m_width(width) , m_height(height) , m_numPixels(width * height) { cudaSetDevice(0); cudaThreadSynchronize(); computeGridSize(m_numPixels, 256, m_numBlocks, m_numThreads); computeGridSize(m_numPixels / 2, 256, m_numBlocksForLow, m_numThreadsForLow); allocateMemory(); MandelbulbCudaKernel::setTexcoords<<<m_numBlocks, m_numThreads>>>( m_numPixels, (int)m_width, (int)m_height, m_mortonCurve, m_texcoords); } // Destructor MandelbulbRenderer::~MandelbulbRenderer() { freeMemory(); freeAdaptiveIterationsMemory(); freeUpsamplingMemory(); } // Update camera information void MandelbulbRenderer::updateCamera(const float3& pos, const float3& forward, const float3& up, const float3& side) { m_cameraPosition = pos; m_cameraForward = forward; m_cameraUp = up; m_cameraSide = side; setDirty(); } // Set pixel angle void MandelbulbRenderer::setPixelAngle(float angle) { if (angle > 0) { m_pixelAngle = angle; } } // Calculate mandelbulb and set colors to pixel buffer void MandelbulbRenderer::createMandelbulb(unsigned char* pixcelColorsHost) { using namespace MandelbulbCudaKernel; clock_t timer; if (m_profile) { timer = clock(); } // Set initial iteration count { startTimer("Initialization"); // Make sure that iteration counts is bigger than minimum counts if (m_initialIterations < m_minimumIterations) { m_initialIterations = m_minimumIterations; } // Run mandelbulb at the camera position on device once and determine the initial iteration counts int iterationLeft; evalMandelbulbOnHost<8>(m_cameraPosition, s_numIterationsOnHost, iterationLeft); if (iterationLeft > 0) { int numIteration = s_numIterationsOnHost - iterationLeft + 1; if (numIteration > m_initialIterations) { m_initialIterations = numIteration; } } endTimer(); } { startTimer("Set Pixel Direction"); // Set direction of rays setPixelDirection <<<m_numBlocks, m_numThreads>>> ( m_numPixels, (int)m_width / 2, (int)m_height / 2, m_texcoords, m_cameraForward, m_cameraUp, m_cameraSide, m_pixelAngle, m_pixelDirs); if (m_adaptiveIterationCount && m_upsampling) { setPixelDirectionLow <<<m_numBlocksForLow, m_numBlocksForLow>>> ( m_numPixels / 2, (int)m_width / 2, (int)m_height / 2, m_texcoordsLow, m_cameraForward, m_cameraUp, m_cameraSide, m_pixelAngle, m_pixelDirsLow); } cudaThreadSynchronize(); endTimer(); } { startTimer("Ray March"); #ifdef ENABLE_DOUBLE_PRECISION // Perform mandelbulb if (m_doublePrecision && !m_upsampling) { raymarchMandelbulbD_kernel<8> <<<m_numBlocks, m_numThreads>>> ( m_numPixels, m_pixelDirs, m_cameraPosition, m_initialDeltaStep, m_iterationAccelerator, m_initialIterations, false, m_pixelIterations, m_pixelDepths, m_pixelDeltaDepths); } else #endif { int numPixels = m_upsampling ? m_numPixels / 2 : m_numPixels; int numBlocks = m_upsampling ? m_numBlocksForLow : m_numBlocks; int numThreads = m_upsampling ? m_numThreadsForLow : m_numThreads; float3* dirs = m_upsampling ? m_pixelDirsLow : m_pixelDirs; int* iterations = m_upsampling ? m_pixelIterationsLow : m_pixelIterations; float* depths = m_upsampling ? m_pixelDepthsLow : m_pixelDepths; float* deltaDepths = m_upsampling ? m_pixelDeltaDepthsLow : m_pixelDeltaDepths; raymarchMandelbulb_kernel<8> <<<numBlocks, numThreads>>> ( numPixels, dirs, m_cameraPosition, m_initialDeltaStep, m_iterationAccelerator, m_initialIterations, m_distanceEstimation, iterations, m_numRayMarchSteps, depths, deltaDepths); } cudaThreadSynchronize(); endTimer(); } if (m_adaptiveIterationCount && !m_doublePrecision) { startTimer("Adaptive Iteration"); // Drill down the surface by adaptive iteration counts int numPixels = m_upsampling ? m_numPixels / 2 : m_numPixels; int numBlocks = m_upsampling ? m_numBlocksForLow : m_numBlocks; int numThreads = m_upsampling ? m_numThreadsForLow : m_numThreads; int width = m_upsampling ? (int)m_width / 2 : (int)m_width; int height = m_upsampling ? (int)m_height / 2 : (int)m_height; int2* texcoords = m_upsampling ? m_texcoordsLow : m_texcoords; float3* dirs = m_upsampling ? m_pixelDirsLow : m_pixelDirs; float* depths = m_upsampling ? m_pixelDepthsLow : m_pixelDepths; float* deltaDepths = m_upsampling ? m_pixelDeltaDepthsLow : m_pixelDeltaDepths; float* depthDiffs = m_upsampling ? m_pixelDepthDiffsLow : m_pixelDepthDiffs; float* depthTmp = m_upsampling ? m_pixelDepthsTmpLow : m_pixelDepthsTmp; int* iterations = m_upsampling ? m_pixelIterationsLow : m_pixelIterations; cudaBindTexture(0, s_depthDiffTexture, depthDiffs, numPixels * sizeof(float)); cudaBindTexture(0, s_depthTexture, depths, numPixels * sizeof(float)); cudaBindTexture(0, s_iterationTexture, iterations, numPixels * sizeof(int)); for (int i = 0; i < m_numDrillingIterations; ++i) { adaptiveIteration_kernel<8><<<numBlocks, numThreads >>>( numPixels, dirs, m_cameraPosition, m_minimumIterations, m_iterationToDeltaStepRate, m_distanceEstimation, iterations, depths, depthTmp, deltaDepths, depthDiffs ); compareDepthDiffs_kernel<<<numBlocks, numThreads >>>( numPixels, width, height, m_laplacianThreshold, depthDiffs, depthTmp, texcoords, m_iterationToDeltaStepRate, m_mortonCurve, m_upsampling, iterations, depths, deltaDepths ); cudaThreadSynchronize(); } endTimer(); { startTimer("Finalize Adaptive Iteration"); finalizeIteration_kernel <<<numBlocks, numThreads>>> ( numPixels, iterations ); cudaThreadSynchronize(); endTimer(); } if (m_upsampling) { startTimer("Upsample"); copyFromLowToHigh<<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, (int)m_width, m_mortonCurve, m_texcoordsLow, m_pixelDepthsLow, m_pixelDeltaDepthsLow, m_pixelIterationsLow, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); cudaBindTexture(0, s_deltaDepthTexture, deltaDepths, numPixels * sizeof(float)); cudaThreadSynchronize(); upsample<8><<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, m_cameraPosition, m_texcoordsLow, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); upsamplePhase1<8><<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, m_cameraPosition, (int)m_width / 2, (int)m_height / 2, (int)m_width, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDepthsLow, m_pixelDeltaDepthsLow, m_pixelIterationsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations ); cudaThreadSynchronize(); cudaUnbindTexture(s_depthTexture); cudaUnbindTexture(s_deltaDepthTexture); cudaUnbindTexture(s_iterationTexture); cudaBindTexture(0, s_depthTexture, m_pixelDepths, m_numPixels * sizeof(float)); cudaBindTexture(0, s_deltaDepthTexture, m_pixelDeltaDepths, m_numPixels * sizeof(float)); cudaBindTexture(0, s_iterationTexture, m_pixelIterations, m_numPixels * sizeof(int)); upsamplePhase2<8><<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations); upsamplePhase3<8><<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, (int)m_width, (int)m_height, m_minimumIterations, m_iterationAccelerator, m_mortonCurve, m_texcoordsLow, m_pixelDirs, m_pixelDepths, m_pixelDeltaDepths, m_pixelIterations); cudaUnbindTexture(s_depthTexture); cudaUnbindTexture(s_deltaDepthTexture); cudaUnbindTexture(s_iterationTexture); endTimer(); } else { cudaUnbindTexture(s_iterationTexture); cudaUnbindTexture(s_depthTexture); } cudaUnbindTexture(s_depthDiffTexture); } { startTimer("Bisection Search"); // Perform bisection search to determine the final position of the surface #ifdef ENABLE_DOUBLE_PRECISION if (m_doublePrecision && !m_upsampling) { binaryPartitionSearchD<8> << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelDirs, m_pixelDeltaDepths, m_cameraPosition, m_pixelIterations, m_pixelPositions, m_pixelDepths); } else #endif { binaryPartitionSearch<8> << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelDirs, m_pixelDeltaDepths, m_cameraPosition, m_pixelIterations, m_pixelPositions, m_pixelDepths); } cudaThreadSynchronize(); endTimer(); } { startTimer("Update Parameters"); // Update initial iteration counts of the next step by taking minimum iterations of this step m_initialIterations = 0; cpyDeviceToHost((void*)m_pixelIterationsHost, (void*)(m_pixelIterations), m_numPixels * sizeof(int)); int m_initialIterations = INT_MAX; for (int i = 0; i < (int)m_numPixels; ++i) { if (m_pixelIterationsHost[i] > 0 && m_initialIterations > m_pixelIterationsHost[i]) { m_initialIterations = m_pixelIterationsHost[i]; } } // Update focal depth and initial delta step of the next step based on the result of this step if (m_needRecalculate) { float centerDepth; int offset; if (m_mortonCurve) { offset = (int)encodeMortonCurveHost(m_width / 2, m_height / 2); } else { offset = m_numPixels / 2 - m_width / 2; } cpyDeviceToHost((void*)&centerDepth, (void*)(m_pixelDepths + offset), sizeof(float)); if (centerDepth > s_minDepth && centerDepth < s_maxDepth) { m_focalDepth = centerDepth; m_initialDeltaStep = centerDepth * m_depthToDeltaStepRate; } m_needRecalculate = false; } endTimer(); } // Calculate normal if(m_normalMode != NoNormal) { startTimer("Calculate Normal"); cudaBindTexture(0, s_positionTexture, m_pixelPositions, m_numPixels * sizeof(float3)); switch (m_normalMode) { case PseudoScreenSpace: samplePseudoScreenSpaceNormals_kernel<<<m_numBlocks, m_numThreads>>>( m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_cameraPosition, m_mortonCurve, m_pixelNormals); break; case ScreenSpace: sampleScreenSpaceNormals_kernel<<<m_numBlocks, m_numThreads>>>( m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_cameraPosition, m_mortonCurve, m_pixelNormals); break; } cudaUnbindTexture(s_positionTexture); cudaThreadSynchronize(); endTimer(); } const unsigned int numLights = (unsigned int)m_lightPositionsHost.size(); { startTimer("Set Color"); if (!m_lightPositions && numLights > 0) { allocateArray((void**)&m_lightPositions, numLights * sizeof(float3)); cpyHostToDevice((void*)m_lightPositions, (void*)(&m_lightPositionsHost.front()), numLights * sizeof(float3)); } // Set colors setColorFromPos << <m_numBlocks, m_numThreads >> > ( m_numPixels, m_pixelPositions, m_pixelDepths, m_pixelNormals, m_cameraPosition, m_lightPositions, numLights, m_normalMode != NoNormal, m_coloringMode == Colorful, m_pixelColorsFloat); cudaThreadSynchronize(); endTimer(); } // Cast shadow if(m_castShadow) { startTimer("Cast Shadow"); castShadow<8> << <m_numBlocks, m_numThreads >> >( m_numPixels, m_minimumIterations, m_pixelPositions, m_pixelDepths, m_lightPositions, numLights, m_pixelColorsFloat); cudaThreadSynchronize(); endTimer(); } // Apply SSAO if(m_ssaoEnabled) { startTimer("SSAO"); cudaBindTexture(0, s_depthTexture, m_pixelDepths, m_numPixels * sizeof(float)); applySSAO_kernel<<<m_numBlocks, m_numThreads >>>( m_numPixels, (int)m_width, (int)m_height, m_texcoords, m_mortonCurve, m_pixelColorsFloat ); cudaUnbindTexture(s_depthTexture); cudaThreadSynchronize(); endTimer(); } // Convert color from float to byte { startTimer("Prepare Result"); colFloatToByte<<<m_numBlocks, m_numThreads>>>(m_numPixels, m_pixelColorsFloat, (int)m_width, m_mortonCurve, pixcelColorsHost); cudaThreadSynchronize(); endTimer(); } if (m_profile) { printf("--------------------\n"); clock_t endTimer = clock(); printMilliSeconds(timer, endTimer, "Total"); printf("--------------------\n"); m_profile = false; } } void MandelbulbRenderer::setMinimumIterations(int minItr) { m_minimumIterations = minItr; m_initialIterations = 0; } void MandelbulbRenderer::setIterationAccelerator(float factor) { m_iterationAccelerator = factor; m_initialIterations = 0; } void MandelbulbRenderer::enableAdaptiveIterations(bool enable) { m_adaptiveIterationCount = enable; if (m_adaptiveIterationCount) { m_minimumIterations = 3; allocateAdaptiveIterationsMemory(); } else { freeAdaptiveIterationsMemory(); } m_initialIterations = 0; } void MandelbulbRenderer::enableDistanceEstimation(bool enable) { m_distanceEstimation = enable; } void MandelbulbRenderer::enableUpsampling(bool enable) { m_upsampling = enable; if (m_upsampling) { allocateUpsamplingMemory(); } else { freeUpsamplingMemory(); } } void MandelbulbRenderer::enableMortonCurveIndexing(bool enable) { m_mortonCurve = enable; // Set up pixel coordinates MandelbulbCudaKernel::setTexcoords<<<m_numBlocks, m_numThreads>>>( m_numPixels, (int)m_width, (int)m_height, m_mortonCurve, m_texcoords); } void MandelbulbRenderer::addLight(const float3& pos) { m_lightPositionsHost.push_back(pos); if (m_lightPositions) { freeArray((void**)m_lightPositions); } } void MandelbulbRenderer::setDepthToDeltaStepRate(float rate) { m_depthToDeltaStepRate = rate; m_initialDeltaStep = m_focalDepth * m_depthToDeltaStepRate; } // Allocate buffers void MandelbulbRenderer::allocateMemory() { const int pixelsTimeFloat3 = m_numPixels * sizeof(float3); const int pixelsTimeFloat = m_numPixels * sizeof(float); allocateArray((void**)&m_pixelColorsFloat, pixelsTimeFloat3); allocateArray((void**)&m_pixelDirs, pixelsTimeFloat3); allocateArray((void**)&m_pixelNormals, pixelsTimeFloat3); allocateArray((void**)&m_pixelPositions, pixelsTimeFloat3); allocateArray((void**)&m_texcoords, m_numPixels * sizeof(int2)); allocateArray((void**)&m_pixelDepths, pixelsTimeFloat); allocateArray((void**)&m_pixelDeltaDepths, pixelsTimeFloat); allocateArray((void**)&m_pixelIterations, m_numPixels * sizeof(int)); allocateArray((void**)&m_numRayMarchSteps, m_numPixels * sizeof(int)); m_pixelIterationsHost = new int[m_numPixels]; if (m_adaptiveIterationCount) { allocateAdaptiveIterationsMemory(); if (m_upsampling) { allocateUpsamplingMemory(); } } } void MandelbulbRenderer::allocateAdaptiveIterationsMemory() { if (m_pixelDepthDiffs) { // Already allocated return; } const int pixelsTimeFloat = m_numPixels * sizeof(float); allocateArray((void**)&m_pixelDepthDiffs, pixelsTimeFloat); allocateArray((void**)&m_pixelDepthsTmp, pixelsTimeFloat); } void MandelbulbRenderer::allocateUpsamplingMemory() { if (m_pixelDirsLow) { // Already allocated return; } freeAdaptiveIterationsMemory(); int numPixels = m_numPixels / 2; const int pixelsTimeFloat3 = numPixels * sizeof(float3); const int pixelsTimeFloat = numPixels * sizeof(float); allocateArray((void**)&m_pixelDirsLow, pixelsTimeFloat3); allocateArray((void**)&m_pixelDepthsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelDeltaDepthsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelIterationsLow, numPixels * sizeof(int)); allocateArray((void**)&m_pixelDepthDiffsLow, pixelsTimeFloat); allocateArray((void**)&m_pixelDepthsTmpLow, pixelsTimeFloat); allocateArray((void**)&m_texcoordsLow, numPixels * sizeof(int2)); MandelbulbCudaKernel::setTexcoords<<<m_numBlocksForLow, m_numThreadsForLow>>>( numPixels, (int)m_width / 2, (int)m_height, m_mortonCurve, m_texcoordsLow); } void MandelbulbRenderer::freeMemory() { freeArray((void**)&m_pixelColorsFloat); freeArray((void**)&m_pixelDirs); freeArray((void**)&m_pixelNormals); freeArray((void**)&m_pixelPositions); freeArray((void**)&m_texcoords); freeArray((void**)&m_pixelDepths); freeArray((void**)&m_pixelDeltaDepths); freeArray((void**)&m_pixelIterations); freeArray((void**)&m_numRayMarchSteps); delete[] m_pixelIterationsHost; m_pixelIterationsHost = nullptr; } void MandelbulbRenderer::freeAdaptiveIterationsMemory() { if (m_pixelDepthDiffs) { freeArray((void**)&m_pixelDepthDiffs); freeArray((void**)&m_pixelDepthsTmp); } } void MandelbulbRenderer::freeUpsamplingMemory() { if (m_pixelDirsLow) { freeArray((void**)&m_pixelDirsLow); freeArray((void**)&m_pixelDepthsLow); freeArray((void**)&m_pixelDeltaDepthsLow); freeArray((void**)&m_pixelDepthDiffsLow); freeArray((void**)&m_pixelDepthsTmpLow); freeArray((void**)&m_texcoordsLow); } } void MandelbulbRenderer::startTimer(const char* timerName) { if (m_profile) { m_startTimer = clock(); m_timerName = timerName; } } void MandelbulbRenderer::endTimer() const { if (m_profile) { clock_t endTimer = clock(); printMilliSeconds(m_startTimer, endTimer, m_timerName); } } void MandelbulbRenderer::printMilliSeconds(const clock_t& c0, const clock_t& c1, const char* name) const { const clock_t deltaClock = c1 - c0; float msec = deltaClock * 1000.f / (float)CLOCKS_PER_SEC; printf("%s : %f msec \n", name, msec); }
53575a489dc59d1e43bb7222eb1f72b488e3d381.hip
// !!! This is a file automatically generated by hipify!!! #include "matmul_kernel.h" #include "openblas/cblas.h" #include "prof.h" #include <assert.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <iostream> #include <math.h> #include <omp.h> #include <stdio.h> using namespace std; #define WMMA_M 16 #define WMMA_N 16 #define WMMA_K 16 __global__ void cuda_kernel_sgemm_100_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta); // typedef texture<float, hipTextureType1D, hipReadModeElementType> floatTex; // texture<float, hipTextureType1D, hipReadModeElementType> tex1DRefA(0, hipFilterModePoint, hipAddressModeBorder); // texture<float, hipTextureType1D, hipReadModeElementType> tex1DRefB(0, hipFilterModePoint, hipAddressModeBorder); #define USE_TEXTURE 0 void gpu_sgemm(float* a, float* b, float* c, size_t N, size_t M, size_t K, float alpha, float beta, int kernel_type, bool half_input, bool trans_a) { float* dev_a = NULL; float* dev_at = NULL; float* dev_b = NULL; float* dev_c = NULL; half* A = NULL; half* B = NULL; float* C = NULL; float* D = NULL; float* at = (float*)malloc(M * K * sizeof(float)); half* ch = (half*)malloc(M * N * sizeof(half)); float flop = 2 * (float)M * (float)N * (float)K; hipblasHandle_t handle; hipblasCreate(&handle); int lda = K; int ldb = N; int ldc = N; // int lda = M; // int ldb = K; // int ldc = M; if (half_input) { checkCudaErrors(hipMalloc((void**)&A, sizeof(half) * M * K)); checkCudaErrors(hipMalloc((void**)&B, sizeof(half) * N * K)); checkCudaErrors(hipMalloc((void**)&C, sizeof(float) * M * N)); checkCudaErrors(hipMalloc((void**)&D, sizeof(float) * M * N)); checkCudaErrors(hipMalloc((void**)&dev_c, M * N * sizeof(float))); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); assert(((unsigned long long)D) % 128 == 0); checkCudaErrors(hipMemcpy(A, a, sizeof(half) * M * K, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(B, b, sizeof(half) * N * K, hipMemcpyHostToDevice)); for (int i = 0; i < M * N; ++i) { ch[i] = __float2half(c[i]); } checkCudaErrors(hipMemcpy(C, ch, sizeof(half) * M * N, hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(D, 0, sizeof(float) * M * N)); hipMemcpy(dev_c, c, M * N * sizeof(float), hipMemcpyHostToDevice); } else { hipMalloc((void**)&dev_a, M * K * sizeof(float)); hipMalloc((void**)&dev_b, K * N * sizeof(float)); hipMalloc((void**)&dev_c, M * N * sizeof(float)); hipMemcpy(dev_a, a, M * K * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, K * N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c, c, M * N * sizeof(float), hipMemcpyHostToDevice); if (trans_a) { for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { at[j * M + i] = a[i * K + j]; } } hipMalloc((void**)&dev_at, M * K * sizeof(float)); hipMemcpy(dev_at, at, M * K * sizeof(float), hipMemcpyHostToDevice); } } // hipBindTexture(0, tex1DRefA, dev_at, M * K * sizeof(float)); // hipBindTexture(0, tex1DRefB, dev_b, K * N * sizeof(float)); int cycle_count = 1000; hipError_t result; hipEvent_t start; hipEvent_t stop; float msecTotal; hipEventCreate(&start); hipEventRecord(start, NULL); switch (kernel_type) { case 0: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); hipLaunchKernelGGL(( cuda_kernel_sgemm_0), dim3(grid_d), dim3(block_d), 0, 0, dev_a, dev_b, dev_c, N, M, K, alpha, beta); break; } case 1: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); hipLaunchKernelGGL(( cuda_kernel_sgemm_1), dim3(grid_d), dim3(block_d), 0, 0, dev_a, dev_b, dev_c, N, M, K, alpha, beta); break; } case 2: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( cuda_kernel_sgemm_2), dim3(grid_d), dim3(block_d), 0, 0, dev_a, dev_b, dev_c, N, M, K, alpha, beta); } break; } case 20: { int grid_r = M / 64; int grid_c = N / 64; if (M % 64 != 0) grid_r += 1; if (N % 64 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( cuda_kernel_sgemm_2_64x64), dim3(grid_d), dim3(block_d), 0, 0, dev_a, dev_b, dev_c, N, M, K, alpha, beta); } break; } case 'b': { assert(!half_input); for (int n = 0; n < cycle_count; ++n) { hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, dev_b, N, dev_a, K, &beta, dev_c, N); } break; } case 21: { // hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT; hipblasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; if (half_input) { for (int n = 0; n < cycle_count; ++n) { hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, HIP_R_16F, N, A, HIP_R_16F, K, &beta, dev_c, HIP_R_32F, N, HIP_R_32F, algo); } } else { for (int n = 0; n < cycle_count; ++n) { hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, dev_b, HIP_R_32F, N, dev_a, HIP_R_32F, K, &beta, dev_c, HIP_R_32F, N, HIP_R_32F, algo); } } break; } case 'c': { if (half_input) { for (int n = 0; n < cycle_count; ++n) { result = CutlassSgemmNN(M, N, K, alpha, (float*)A, lda, (float*)B, ldb, beta, dev_c, ldc, 1); } } else { for (int n = 0; n < cycle_count; ++n) { result = CutlassSgemmNN(M, N, K, alpha, dev_a, lda, dev_b, ldb, beta, dev_c, ldc, 1); } } // if (result == hipSuccess) { // cout << "CutlassSgemmNN success" << endl; // } break; } case 'r': { hipError_t result; result = ReferenceGemm(M, N, K, alpha, dev_a, lda, dev_b, ldb, beta, dev_c, ldc); // if (result == hipSuccess) { // cout << "ReferenceGemm success" << endl; // } break; } case 't': { dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32); gridDim.y = (N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y); assert(half_input); hipLaunchKernelGGL(( wmma_sgemm_kernel), dim3(gridDim), dim3(blockDim), 0, 0, A, B, C, D, M, N, K, alpha, beta); } case 100: { int stride_x = 64; int stride_y = 64; int grid_x = (N + stride_x - 1) / stride_x; int grid_y = (M + stride_y - 1) / stride_y; int block_x = stride_x; dim3 grid_d(grid_x, grid_y, 1); dim3 block_d(block_x, 1, 1); // std::cout << grid_x << " " << grid_y << " " << block_x << std::endl; assert(trans_a); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( cuda_kernel_sgemm_100), dim3(grid_d), dim3(block_d), 0, 0, dev_at, dev_b, dev_c, M, N, K, alpha, beta); // cuda_kernel_sgemm_100_tex<<<grid_d, block_d>>>(dev_at, dev_b, dev_c, M, N, K, alpha, beta); // cuda_kernel_sgemm_100_v2<<<grid_d, block_d>>>(dev_at, dev_b, dev_c, M, N, K, alpha, beta); } break; } case 101: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( ampere_sgemm_128x256x8_kernel), dim3(grid), dim3(256), 0, 0, dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 102: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( ampere_sgemm_my_opt_128x256x8_kernel_no_pingpong), dim3(grid), dim3(256), 0, 0, dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 103: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( ampere_sgemm_my_opt_128x256x8_kernel_sm_pingpong), dim3(grid), dim3(256), 0, 0, dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 104: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { hipLaunchKernelGGL(( ampere_sgemm_my_opt_128x256x8_kernel_sm_reg_pingpong), dim3(grid), dim3(256), 0, 0, dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } } hipEventCreate(&stop); hipEventRecord(stop, NULL); hipEventSynchronize(stop); hipEventElapsedTime(&msecTotal, start, stop); // hipDeviceSynchronize(); float GFLOPs = cycle_count * flop / msecTotal / 1e+6; float compute_peak_ratio = 0.0; if (half_input) { compute_peak_ratio = GFLOPs / 312000.0f * 100; } else { compute_peak_ratio = GFLOPs / 19500.0f * 100; } printf("Processing time: %f (ms), GFLOPS: %.6f, compute_peak_ratio: %.2f\%\n", msecTotal / cycle_count, GFLOPs, compute_peak_ratio); float* ct = (float*)malloc(M * N * sizeof(float)); if (kernel_type == 100) { hipMemcpy(ct, dev_c, M * N * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { c[j * N + i] = ct[i * M + j]; } } } else { hipMemcpy(c, dev_c, M * N * sizeof(float), hipMemcpyDeviceToHost); } hipblasDestroy(handle); if (half_input) { checkCudaErrors(hipFree((void*)A)); checkCudaErrors(hipFree((void*)B)); checkCudaErrors(hipFree((void*)C)); checkCudaErrors(hipFree((void*)D)); } else { checkCudaErrors(hipFree(dev_a)); checkCudaErrors(hipFree(dev_b)); if (trans_a) { checkCudaErrors(hipFree(dev_at)); } } checkCudaErrors(hipFree(dev_c)); free(at); free(ch); free(ct); } void gpu_warmup() { float* dev_p = 0; hs_timer timer; timer.tic("gpu warmup"); hipMalloc((void**)&dev_p, 16 * 32 * sizeof(float)); hipLaunchKernelGGL(( cuda_kernel_warmup), dim3(16), dim3(32), 0, 0, dev_p); hipDeviceSynchronize(); hipFree(dev_p); timer.toc("gpu warmup"); } // void cpu_kernel_sgemm_0(float *a, float *b, float *c, size_t N, size_t M, size_t K, float alpha, float beta) { // for (int m = 0; m < M; ++m) { // for (int n = 0; n < N; ++n) { // float acc = 0.0f; // for (int k = 0; k < K; ++k) { // acc += a[m * K + k] * b[k * N + n]; // } // c[m * N + n] = alpha * acc + beta * c[m * N + n]; // } // } // } void cpu_sgemm(float* a, float* b, float* c, size_t N, size_t M, size_t K, float alpha, float beta, int kernel_type) { hs_timer timer; timer.tic("cpu sgemm"); switch (kernel_type) { case 0: { cpu_kernel_sgemm_0(a, b, c, N, M, K, alpha, beta); break; } case 'm': { cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, alpha, a, K, b, N, beta, c, N); break; } } timer.toc("cpu sgemm"); } void cpu_warmup() { hs_timer timer; timer.tic("cpu warmup"); const size_t arr_size = 1024; float* p = new float[arr_size]; #pragma omp parallel for simd for (size_t i = 0; i < arr_size; i++) { float f = (float)i; p[i] = f * f * f; } delete p; timer.toc("cpu warmup"); } __device__ void sgemm_block_64x64_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta) { __shared__ float a_b_shm[2 * 16 * 64]; int block_x = blockIdx.x; int block_y = blockIdx.y; // float* a = pa + block_y * 64; // float* b = pb + block_x * 64; // float* c = pc + block_x * 64 * M + block_y * 64; // int tid = threadIdx.x & 0x3f; int tid = threadIdx.x; int ldx = tid >= 32 ? N : M; int tid2 = (tid >> 4) & 1; // int tid15 = tid & 0xf; int tid15 = tid & 15; int a_b_offset = tid >= 32 ? block_x * 64 : block_y * 64; int track0 = a_b_offset + tid2 * ldx + tid15 * 4; int track2 = track0 + 2 * ldx; int track4 = track0 + 4 * ldx; int track6 = track0 + 6 * ldx; int end = track0 + (K - 8) * ldx; int write_offset = tid2 * 64 + tid15 * 4; write_offset += tid >= 32 ? 512 : 0; int readAs = ((tid >> 1) & 7) << 2; int readBs = ((((tid & 0x30) >> 3) | (tid & 1)) << 2) + 512; #if USE_TEXTURE floatTex tex = tid >= 32 ? tex1DRefB : tex1DRefA; #else float* read_addr = tid >= 32 ? b : a; #endif float cbb00 = 0, cbb01 = 0, cbb02 = 0, cbb03 = 0; float cbb10 = 0, cbb11 = 0, cbb12 = 0, cbb13 = 0; float cbb20 = 0, cbb21 = 0, cbb22 = 0, cbb23 = 0; float cbb30 = 0, cbb31 = 0, cbb32 = 0, cbb33 = 0; float cba00 = 0, cba01 = 0, cba02 = 0, cba03 = 0; float cba10 = 0, cba11 = 0, cba12 = 0, cba13 = 0; float cba20 = 0, cba21 = 0, cba22 = 0, cba23 = 0; float cba30 = 0, cba31 = 0, cba32 = 0, cba33 = 0; float cab00 = 0, cab01 = 0, cab02 = 0, cab03 = 0; float cab10 = 0, cab11 = 0, cab12 = 0, cab13 = 0; float cab20 = 0, cab21 = 0, cab22 = 0, cab23 = 0; float cab30 = 0, cab31 = 0, cab32 = 0, cab33 = 0; float caa00 = 0, caa01 = 0, caa02 = 0, caa03 = 0; float caa10 = 0, caa11 = 0, caa12 = 0, caa13 = 0; float caa20 = 0, caa21 = 0, caa22 = 0, caa23 = 0; float caa30 = 0, caa31 = 0, caa32 = 0, caa33 = 0; // float cbb00, cbb01, cbb02, cbb03; // float cbb10, cbb11, cbb12, cbb13; // float cbb20, cbb21, cbb22, cbb23; // float cbb30, cbb31, cbb32, cbb33; // float cba00, cba01, cba02, cba03; // float cba10, cba11, cba12, cba13; // float cba20, cba21, cba22, cba23; // float cba30, cba31, cba32, cba33; // float cab00, cab01, cab02, cab03; // float cab10, cab11, cab12, cab13; // float cab20, cab21, cab22, cab23; // float cab30, cab31, cab32, cab33; // float caa00, caa01, caa02, caa03; // float caa10, caa11, caa12, caa13; // float caa20, caa21, caa22, caa23; // float caa30, caa31, caa32, caa33; float j0Ab00, j0Ab01, j0Ab02, j0Ab03; float j0Bb00, j0Bb01, j0Bb02, j0Bb03; float j0Aa00, j0Aa01, j0Aa02, j0Aa03; float j0Ba00, j0Ba01, j0Ba02, j0Ba03; // float j1Ab00, j1Ab01, j1Ab02, j1Ab03; // float j1Bb00, j1Bb01, j1Bb02, j1Bb03; // float j1Aa00, j1Aa01, j1Aa02, j1Aa03; // float j1Ba00, j1Ba01, j1Ba02, j1Ba03; // float j0Ab00=1, j0Ab01=1, j0Ab02=1, j0Ab03=1; // float j0Bb00=1, j0Bb01=1, j0Bb02=1, j0Bb03=1; // float j0Aa00=1, j0Aa01=1, j0Aa02=1, j0Aa03=1; // float j0Ba00=1, j0Ba01=1, j0Ba02=1, j0Ba03=1; // float j1Ab00=1, j1Ab01=1, j1Ab02=1, j1Ab03=1; // float j1Bb00=1, j1Bb01=1, j1Bb02=1, j1Bb03=1; // float j1Aa00=1, j1Aa01=1, j1Aa02=1, j1Aa03=1; // float j1Ba00=1, j1Ba01=1, j1Ba02=1, j1Ba03=1; while (track0 <= end) { #if USE_TEXTURE a_b_shm[write_offset + 0 * 64 + 0] = tex1Dfetch(tex, track0 + 0); a_b_shm[write_offset + 0 * 64 + 1] = tex1Dfetch(tex, track0 + 1); a_b_shm[write_offset + 0 * 64 + 2] = tex1Dfetch(tex, track0 + 2); a_b_shm[write_offset + 0 * 64 + 3] = tex1Dfetch(tex, track0 + 3); a_b_shm[write_offset + 2 * 64 + 0] = tex1Dfetch(tex, track2 + 0); a_b_shm[write_offset + 2 * 64 + 1] = tex1Dfetch(tex, track2 + 1); a_b_shm[write_offset + 2 * 64 + 2] = tex1Dfetch(tex, track2 + 2); a_b_shm[write_offset + 2 * 64 + 3] = tex1Dfetch(tex, track2 + 3); a_b_shm[write_offset + 4 * 64 + 0] = tex1Dfetch(tex, track4 + 0); a_b_shm[write_offset + 4 * 64 + 1] = tex1Dfetch(tex, track4 + 1); a_b_shm[write_offset + 4 * 64 + 2] = tex1Dfetch(tex, track4 + 2); a_b_shm[write_offset + 4 * 64 + 3] = tex1Dfetch(tex, track4 + 3); a_b_shm[write_offset + 6 * 64 + 0] = tex1Dfetch(tex, track6 + 0); a_b_shm[write_offset + 6 * 64 + 1] = tex1Dfetch(tex, track6 + 1); a_b_shm[write_offset + 6 * 64 + 2] = tex1Dfetch(tex, track6 + 2); a_b_shm[write_offset + 6 * 64 + 3] = tex1Dfetch(tex, track6 + 3); #else a_b_shm[write_offset + 0 * 64 + 0] = read_addr[track0 + 0]; a_b_shm[write_offset + 0 * 64 + 1] = read_addr[track0 + 1]; a_b_shm[write_offset + 0 * 64 + 2] = read_addr[track0 + 2]; a_b_shm[write_offset + 0 * 64 + 3] = read_addr[track0 + 3]; a_b_shm[write_offset + 2 * 64 + 0] = read_addr[track2 + 0]; a_b_shm[write_offset + 2 * 64 + 1] = read_addr[track2 + 1]; a_b_shm[write_offset + 2 * 64 + 2] = read_addr[track2 + 2]; a_b_shm[write_offset + 2 * 64 + 3] = read_addr[track2 + 3]; a_b_shm[write_offset + 4 * 64 + 0] = read_addr[track4 + 0]; a_b_shm[write_offset + 4 * 64 + 1] = read_addr[track4 + 1]; a_b_shm[write_offset + 4 * 64 + 2] = read_addr[track4 + 2]; a_b_shm[write_offset + 4 * 64 + 3] = read_addr[track4 + 3]; a_b_shm[write_offset + 6 * 64 + 0] = read_addr[track6 + 0]; a_b_shm[write_offset + 6 * 64 + 1] = read_addr[track6 + 1]; a_b_shm[write_offset + 6 * 64 + 2] = read_addr[track6 + 2]; a_b_shm[write_offset + 6 * 64 + 3] = read_addr[track6 + 3]; #endif __syncthreads(); // __syncwarp(0xFFFFFFFF); write_offset ^= 16 * 64; track0 += 8 * ldx; track2 += 8 * ldx; track4 += 8 * ldx; track6 += 8 * ldx; for (int j = 0; j < 8; ++j) { // int prefetch = (j + 1) % 8; int prefetch = j; j0Ab00 = a_b_shm[readAs + prefetch * 64 + 0]; j0Ab01 = a_b_shm[readAs + prefetch * 64 + 1]; j0Ab02 = a_b_shm[readAs + prefetch * 64 + 2]; j0Ab03 = a_b_shm[readAs + prefetch * 64 + 3]; j0Bb00 = a_b_shm[readBs + prefetch * 64 + 0]; j0Bb01 = a_b_shm[readBs + prefetch * 64 + 1]; j0Bb02 = a_b_shm[readBs + prefetch * 64 + 2]; j0Bb03 = a_b_shm[readBs + prefetch * 64 + 3]; j0Aa00 = a_b_shm[readAs + prefetch * 64 + 32 + 0]; j0Aa01 = a_b_shm[readAs + prefetch * 64 + 32 + 1]; j0Aa02 = a_b_shm[readAs + prefetch * 64 + 32 + 2]; j0Aa03 = a_b_shm[readAs + prefetch * 64 + 32 + 3]; j0Ba00 = a_b_shm[readBs + prefetch * 64 + 32 + 0]; j0Ba01 = a_b_shm[readBs + prefetch * 64 + 32 + 1]; j0Ba02 = a_b_shm[readBs + prefetch * 64 + 32 + 2]; j0Ba03 = a_b_shm[readBs + prefetch * 64 + 32 + 3]; cbb00 += j0Bb00 * j0Ab00; cbb01 += j0Bb00 * j0Ab01; // j1Ab00 = a_b_shm[readAs + prefetch * 64 + 0]; // j1Ab01 = a_b_shm[readAs + prefetch * 64 + 1]; // j1Ab02 = a_b_shm[readAs + prefetch * 64 + 2]; // j1Ab03 = a_b_shm[readAs + prefetch * 64 + 3]; cbb02 += j0Bb00 * j0Ab02; cbb03 += j0Bb00 * j0Ab03; // j1Bb00 = a_b_shm[readBs + prefetch * 64 + 0]; // j1Bb01 = a_b_shm[readBs + prefetch * 64 + 1]; // j1Bb02 = a_b_shm[readBs + prefetch * 64 + 2]; // j1Bb03 = a_b_shm[readBs + prefetch * 64 + 3]; cbb10 += j0Bb01 * j0Ab00; cbb11 += j0Bb01 * j0Ab01; // j1Aa00 = a_b_shm[readAs + prefetch * 64 + 32 + 0]; // j1Aa01 = a_b_shm[readAs + prefetch * 64 + 32 + 1]; // j1Aa02 = a_b_shm[readAs + prefetch * 64 + 32 + 2]; // j1Aa03 = a_b_shm[readAs + prefetch * 64 + 32 + 3]; cbb12 += j0Bb01 * j0Ab02; cbb13 += j0Bb01 * j0Ab03; // j1Ba00 = a_b_shm[readBs + prefetch * 64 + 32 + 0]; // j1Ba01 = a_b_shm[readBs + prefetch * 64 + 32 + 1]; // j1Ba02 = a_b_shm[readBs + prefetch * 64 + 32 + 2]; // j1Ba03 = a_b_shm[readBs + prefetch * 64 + 32 + 3]; cbb20 += j0Bb02 * j0Ab00; cbb21 += j0Bb02 * j0Ab01; cbb22 += j0Bb02 * j0Ab02; cbb23 += j0Bb02 * j0Ab03; cbb30 += j0Bb03 * j0Ab00; cbb31 += j0Bb03 * j0Ab01; cbb32 += j0Bb03 * j0Ab02; cbb33 += j0Bb03 * j0Ab03; cba00 += j0Ba00 * j0Ab00; cba01 += j0Ba00 * j0Ab01; cba02 += j0Ba00 * j0Ab02; cba03 += j0Ba00 * j0Ab03; cba10 += j0Ba01 * j0Ab00; cba11 += j0Ba01 * j0Ab01; cba12 += j0Ba01 * j0Ab02; cba13 += j0Ba01 * j0Ab03; cba20 += j0Ba02 * j0Ab00; cba21 += j0Ba02 * j0Ab01; cba22 += j0Ba02 * j0Ab02; cba23 += j0Ba02 * j0Ab03; cba30 += j0Ba03 * j0Ab00; cba31 += j0Ba03 * j0Ab01; cba32 += j0Ba03 * j0Ab02; cba33 += j0Ba03 * j0Ab03; cab00 += j0Bb00 * j0Aa00; cab01 += j0Bb00 * j0Aa01; cab02 += j0Bb00 * j0Aa02; cab03 += j0Bb00 * j0Aa03; cab10 += j0Bb01 * j0Aa00; cab11 += j0Bb01 * j0Aa01; cab12 += j0Bb01 * j0Aa02; cab13 += j0Bb01 * j0Aa03; cab20 += j0Bb02 * j0Aa00; cab21 += j0Bb02 * j0Aa01; cab22 += j0Bb02 * j0Aa02; cab23 += j0Bb02 * j0Aa03; cab30 += j0Bb03 * j0Aa00; cab31 += j0Bb03 * j0Aa01; cab32 += j0Bb03 * j0Aa02; cab33 += j0Bb03 * j0Aa03; caa00 += j0Ba00 * j0Aa00; caa01 += j0Ba00 * j0Aa01; caa02 += j0Ba00 * j0Aa02; caa03 += j0Ba00 * j0Aa03; caa10 += j0Ba01 * j0Aa00; caa11 += j0Ba01 * j0Aa01; caa12 += j0Ba01 * j0Aa02; caa13 += j0Ba01 * j0Aa03; caa20 += j0Ba02 * j0Aa00; caa21 += j0Ba02 * j0Aa01; caa22 += j0Ba02 * j0Aa02; caa23 += j0Ba02 * j0Aa03; caa30 += j0Ba03 * j0Aa00; caa31 += j0Ba03 * j0Aa01; caa32 += j0Ba03 * j0Aa02; caa33 += j0Ba03 * j0Aa03; } readAs ^= 16 * 64; readBs ^= 16 * 64; } __syncthreads(); int tid31 = tid & 31; int tid32 = tid & 32; int coord_x = readBs & 0x7f; int coord_y = readAs & 0x7f; int writeCs = coord_x / 4 * 64 + coord_y; int readCs = (tid32 << 3) + tid31; int ldc4 = M * 4; int Cy00 = block_x * 64 * M + block_y * 64 + (tid32 >> 1) * M + tid31; int Cy04 = Cy00 + ldc4; int Cy08 = Cy00 + 2 * ldc4; int Cy12 = Cy00 + 3 * ldc4; a_b_shm[writeCs + 0] = cbb00; a_b_shm[writeCs + 1] = cbb01; a_b_shm[writeCs + 2] = cbb02; a_b_shm[writeCs + 3] = cbb03; a_b_shm[writeCs + 32 + 0] = cab00; a_b_shm[writeCs + 32 + 1] = cab01; a_b_shm[writeCs + 32 + 2] = cab02; a_b_shm[writeCs + 32 + 3] = cab03; // if (threadIdx.x == 1) { // printf("reg r0, c4: %f\n", cbb00); // } // if (threadIdx.x == 18) { // printf("reg r7, c8: %f\n", cbb03); // printf("reg r39, c8: %f\n", cab03); // } cbb00 = a_b_shm[readCs + 0 * 64 + 0]; cbb01 = a_b_shm[readCs + 0 * 64 + 32]; cbb02 = a_b_shm[readCs + 1 * 64 + 0]; cbb03 = a_b_shm[readCs + 1 * 64 + 32]; cab00 = a_b_shm[readCs + 2 * 64 + 0]; cab01 = a_b_shm[readCs + 2 * 64 + 32]; cab02 = a_b_shm[readCs + 3 * 64 + 0]; cab03 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb00; c[Cy00 + 32] = cbb01; c[Cy04 + 0] = cbb02; c[Cy04 + 32] = cbb03; c[Cy08 + 0] = cab00; c[Cy08 + 32] = cab01; c[Cy12 + 0] = cab02; c[Cy12 + 32] = cab03; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb10; a_b_shm[writeCs + 1] = cbb11; a_b_shm[writeCs + 2] = cbb12; a_b_shm[writeCs + 3] = cbb13; a_b_shm[writeCs + 32 + 0] = cab10; a_b_shm[writeCs + 32 + 1] = cab11; a_b_shm[writeCs + 32 + 2] = cab12; a_b_shm[writeCs + 32 + 3] = cab13; cbb10 = a_b_shm[readCs + 0 * 64 + 0]; cbb11 = a_b_shm[readCs + 0 * 64 + 32]; cbb12 = a_b_shm[readCs + 1 * 64 + 0]; cbb13 = a_b_shm[readCs + 1 * 64 + 32]; cab10 = a_b_shm[readCs + 2 * 64 + 0]; cab11 = a_b_shm[readCs + 2 * 64 + 32]; cab12 = a_b_shm[readCs + 3 * 64 + 0]; cab13 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb10; c[Cy00 + 32] = cbb11; c[Cy04 + 0] = cbb12; c[Cy04 + 32] = cbb13; c[Cy08 + 0] = cab10; c[Cy08 + 32] = cab11; c[Cy12 + 0] = cab12; c[Cy12 + 32] = cab13; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb20; a_b_shm[writeCs + 1] = cbb21; a_b_shm[writeCs + 2] = cbb22; a_b_shm[writeCs + 3] = cbb23; a_b_shm[writeCs + 32 + 0] = cab20; a_b_shm[writeCs + 32 + 1] = cab21; a_b_shm[writeCs + 32 + 2] = cab22; a_b_shm[writeCs + 32 + 3] = cab23; cbb20 = a_b_shm[readCs + 0 * 64 + 0]; cbb21 = a_b_shm[readCs + 0 * 64 + 32]; cbb22 = a_b_shm[readCs + 1 * 64 + 0]; cbb23 = a_b_shm[readCs + 1 * 64 + 32]; cab20 = a_b_shm[readCs + 2 * 64 + 0]; cab21 = a_b_shm[readCs + 2 * 64 + 32]; cab22 = a_b_shm[readCs + 3 * 64 + 0]; cab23 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb20; c[Cy00 + 32] = cbb21; c[Cy04 + 0] = cbb22; c[Cy04 + 32] = cbb23; c[Cy08 + 0] = cab20; c[Cy08 + 32] = cab21; c[Cy12 + 0] = cab22; c[Cy12 + 32] = cab23; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb30; a_b_shm[writeCs + 1] = cbb31; a_b_shm[writeCs + 2] = cbb32; a_b_shm[writeCs + 3] = cbb33; a_b_shm[writeCs + 32 + 0] = cab30; a_b_shm[writeCs + 32 + 1] = cab31; a_b_shm[writeCs + 32 + 2] = cab32; a_b_shm[writeCs + 32 + 3] = cab33; cbb30 = a_b_shm[readCs + 0 * 64 + 0]; cbb31 = a_b_shm[readCs + 0 * 64 + 32]; cbb32 = a_b_shm[readCs + 1 * 64 + 0]; cbb33 = a_b_shm[readCs + 1 * 64 + 32]; cab30 = a_b_shm[readCs + 2 * 64 + 0]; cab31 = a_b_shm[readCs + 2 * 64 + 32]; cab32 = a_b_shm[readCs + 3 * 64 + 0]; cab33 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb30; c[Cy00 + 32] = cbb31; c[Cy04 + 0] = cbb32; c[Cy04 + 32] = cbb33; c[Cy08 + 0] = cab30; c[Cy08 + 32] = cab31; c[Cy12 + 0] = cab32; c[Cy12 + 32] = cab33; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; Cy00 += 28 * M; Cy04 += 28 * M; Cy08 += 28 * M; Cy12 += 28 * M; a_b_shm[writeCs + 0] = cba00; a_b_shm[writeCs + 1] = cba01; a_b_shm[writeCs + 2] = cba02; a_b_shm[writeCs + 3] = cba03; a_b_shm[writeCs + 32 + 0] = caa00; a_b_shm[writeCs + 32 + 1] = caa01; a_b_shm[writeCs + 32 + 2] = caa02; a_b_shm[writeCs + 32 + 3] = caa03; cba00 = a_b_shm[readCs + 0 * 64 + 0]; cba01 = a_b_shm[readCs + 0 * 64 + 32]; cba02 = a_b_shm[readCs + 1 * 64 + 0]; cba03 = a_b_shm[readCs + 1 * 64 + 32]; caa00 = a_b_shm[readCs + 2 * 64 + 0]; caa01 = a_b_shm[readCs + 2 * 64 + 32]; caa02 = a_b_shm[readCs + 3 * 64 + 0]; caa03 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba00; c[Cy00 + 32] = cba01; c[Cy04 + 0] = cba02; c[Cy04 + 32] = cba03; c[Cy08 + 0] = caa00; c[Cy08 + 32] = caa01; c[Cy12 + 0] = caa02; c[Cy12 + 32] = caa03; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba10; a_b_shm[writeCs + 1] = cba11; a_b_shm[writeCs + 2] = cba12; a_b_shm[writeCs + 3] = cba13; a_b_shm[writeCs + 32 + 0] = caa10; a_b_shm[writeCs + 32 + 1] = caa11; a_b_shm[writeCs + 32 + 2] = caa12; a_b_shm[writeCs + 32 + 3] = caa13; cba10 = a_b_shm[readCs + 0 * 64 + 0]; cba11 = a_b_shm[readCs + 0 * 64 + 32]; cba12 = a_b_shm[readCs + 1 * 64 + 0]; cba13 = a_b_shm[readCs + 1 * 64 + 32]; caa10 = a_b_shm[readCs + 2 * 64 + 0]; caa11 = a_b_shm[readCs + 2 * 64 + 32]; caa12 = a_b_shm[readCs + 3 * 64 + 0]; caa13 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba10; c[Cy00 + 32] = cba11; c[Cy04 + 0] = cba12; c[Cy04 + 32] = cba13; c[Cy08 + 0] = caa10; c[Cy08 + 32] = caa11; c[Cy12 + 0] = caa12; c[Cy12 + 32] = caa13; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba20; a_b_shm[writeCs + 1] = cba21; a_b_shm[writeCs + 2] = cba22; a_b_shm[writeCs + 3] = cba23; a_b_shm[writeCs + 32 + 0] = caa20; a_b_shm[writeCs + 32 + 1] = caa21; a_b_shm[writeCs + 32 + 2] = caa22; a_b_shm[writeCs + 32 + 3] = caa23; cba20 = a_b_shm[readCs + 0 * 64 + 0]; cba21 = a_b_shm[readCs + 0 * 64 + 32]; cba22 = a_b_shm[readCs + 1 * 64 + 0]; cba23 = a_b_shm[readCs + 1 * 64 + 32]; caa20 = a_b_shm[readCs + 2 * 64 + 0]; caa21 = a_b_shm[readCs + 2 * 64 + 32]; caa22 = a_b_shm[readCs + 3 * 64 + 0]; caa23 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba20; c[Cy00 + 32] = cba21; c[Cy04 + 0] = cba22; c[Cy04 + 32] = cba23; c[Cy08 + 0] = caa20; c[Cy08 + 32] = caa21; c[Cy12 + 0] = caa22; c[Cy12 + 32] = caa23; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba30; a_b_shm[writeCs + 1] = cba31; a_b_shm[writeCs + 2] = cba32; a_b_shm[writeCs + 3] = cba33; a_b_shm[writeCs + 32 + 0] = caa30; a_b_shm[writeCs + 32 + 1] = caa31; a_b_shm[writeCs + 32 + 2] = caa32; a_b_shm[writeCs + 32 + 3] = caa33; cba30 = a_b_shm[readCs + 0 * 64 + 0]; cba31 = a_b_shm[readCs + 0 * 64 + 32]; cba32 = a_b_shm[readCs + 1 * 64 + 0]; cba33 = a_b_shm[readCs + 1 * 64 + 32]; caa30 = a_b_shm[readCs + 2 * 64 + 0]; caa31 = a_b_shm[readCs + 2 * 64 + 32]; caa32 = a_b_shm[readCs + 3 * 64 + 0]; caa33 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba30; c[Cy00 + 32] = cba31; c[Cy04 + 0] = cba32; c[Cy04 + 32] = cba33; c[Cy08 + 0] = caa30; c[Cy08 + 32] = caa31; c[Cy12 + 0] = caa32; c[Cy12 + 32] = caa33; } __global__ void cuda_kernel_sgemm_100_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta) { sgemm_block_64x64_tex(a, b, c, M, N, K, alpha, beta); }
53575a489dc59d1e43bb7222eb1f72b488e3d381.cu
#include "matmul_kernel.h" #include "openblas/cblas.h" #include "prof.h" #include <assert.h> #include <cublas_v2.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <iostream> #include <math.h> #include <omp.h> #include <stdio.h> using namespace std; #define WMMA_M 16 #define WMMA_N 16 #define WMMA_K 16 __global__ void cuda_kernel_sgemm_100_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta); // typedef texture<float, cudaTextureType1D, cudaReadModeElementType> floatTex; // texture<float, cudaTextureType1D, cudaReadModeElementType> tex1DRefA(0, cudaFilterModePoint, cudaAddressModeBorder); // texture<float, cudaTextureType1D, cudaReadModeElementType> tex1DRefB(0, cudaFilterModePoint, cudaAddressModeBorder); #define USE_TEXTURE 0 void gpu_sgemm(float* a, float* b, float* c, size_t N, size_t M, size_t K, float alpha, float beta, int kernel_type, bool half_input, bool trans_a) { float* dev_a = NULL; float* dev_at = NULL; float* dev_b = NULL; float* dev_c = NULL; half* A = NULL; half* B = NULL; float* C = NULL; float* D = NULL; float* at = (float*)malloc(M * K * sizeof(float)); half* ch = (half*)malloc(M * N * sizeof(half)); float flop = 2 * (float)M * (float)N * (float)K; cublasHandle_t handle; cublasCreate(&handle); int lda = K; int ldb = N; int ldc = N; // int lda = M; // int ldb = K; // int ldc = M; if (half_input) { checkCudaErrors(cudaMalloc((void**)&A, sizeof(half) * M * K)); checkCudaErrors(cudaMalloc((void**)&B, sizeof(half) * N * K)); checkCudaErrors(cudaMalloc((void**)&C, sizeof(float) * M * N)); checkCudaErrors(cudaMalloc((void**)&D, sizeof(float) * M * N)); checkCudaErrors(cudaMalloc((void**)&dev_c, M * N * sizeof(float))); assert(((unsigned long long)A) % 128 == 0); assert(((unsigned long long)B) % 128 == 0); assert(((unsigned long long)C) % 128 == 0); assert(((unsigned long long)D) % 128 == 0); checkCudaErrors(cudaMemcpy(A, a, sizeof(half) * M * K, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(B, b, sizeof(half) * N * K, cudaMemcpyHostToDevice)); for (int i = 0; i < M * N; ++i) { ch[i] = __float2half(c[i]); } checkCudaErrors(cudaMemcpy(C, ch, sizeof(half) * M * N, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(D, 0, sizeof(float) * M * N)); cudaMemcpy(dev_c, c, M * N * sizeof(float), cudaMemcpyHostToDevice); } else { cudaMalloc((void**)&dev_a, M * K * sizeof(float)); cudaMalloc((void**)&dev_b, K * N * sizeof(float)); cudaMalloc((void**)&dev_c, M * N * sizeof(float)); cudaMemcpy(dev_a, a, M * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, K * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, M * N * sizeof(float), cudaMemcpyHostToDevice); if (trans_a) { for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { at[j * M + i] = a[i * K + j]; } } cudaMalloc((void**)&dev_at, M * K * sizeof(float)); cudaMemcpy(dev_at, at, M * K * sizeof(float), cudaMemcpyHostToDevice); } } // cudaBindTexture(0, tex1DRefA, dev_at, M * K * sizeof(float)); // cudaBindTexture(0, tex1DRefB, dev_b, K * N * sizeof(float)); int cycle_count = 1000; cudaError_t result; cudaEvent_t start; cudaEvent_t stop; float msecTotal; cudaEventCreate(&start); cudaEventRecord(start, NULL); switch (kernel_type) { case 0: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); cuda_kernel_sgemm_0<<<grid_d, block_d>>>(dev_a, dev_b, dev_c, N, M, K, alpha, beta); break; } case 1: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); cuda_kernel_sgemm_1<<<grid_d, block_d>>>(dev_a, dev_b, dev_c, N, M, K, alpha, beta); break; } case 2: { int grid_r = M / 32; int grid_c = N / 32; if (M % 32 != 0) grid_r += 1; if (N % 32 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); for (int n = 0; n < cycle_count; ++n) { cuda_kernel_sgemm_2<<<grid_d, block_d>>>(dev_a, dev_b, dev_c, N, M, K, alpha, beta); } break; } case 20: { int grid_r = M / 64; int grid_c = N / 64; if (M % 64 != 0) grid_r += 1; if (N % 64 != 0) grid_c += 1; dim3 grid_d(grid_r, grid_c, 1); dim3 block_d(32, 32, 1); for (int n = 0; n < cycle_count; ++n) { cuda_kernel_sgemm_2_64x64<<<grid_d, block_d>>>(dev_a, dev_b, dev_c, N, M, K, alpha, beta); } break; } case 'b': { assert(!half_input); for (int n = 0; n < cycle_count; ++n) { cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, dev_b, N, dev_a, K, &beta, dev_c, N); } break; } case 21: { // cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; if (half_input) { for (int n = 0; n < cycle_count; ++n) { cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, CUDA_R_16F, N, A, CUDA_R_16F, K, &beta, dev_c, CUDA_R_32F, N, CUDA_R_32F, algo); } } else { for (int n = 0; n < cycle_count; ++n) { cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, dev_b, CUDA_R_32F, N, dev_a, CUDA_R_32F, K, &beta, dev_c, CUDA_R_32F, N, CUDA_R_32F, algo); } } break; } case 'c': { if (half_input) { for (int n = 0; n < cycle_count; ++n) { result = CutlassSgemmNN(M, N, K, alpha, (float*)A, lda, (float*)B, ldb, beta, dev_c, ldc, 1); } } else { for (int n = 0; n < cycle_count; ++n) { result = CutlassSgemmNN(M, N, K, alpha, dev_a, lda, dev_b, ldb, beta, dev_c, ldc, 1); } } // if (result == cudaSuccess) { // cout << "CutlassSgemmNN success" << endl; // } break; } case 'r': { cudaError_t result; result = ReferenceGemm(M, N, K, alpha, dev_a, lda, dev_b, ldb, beta, dev_c, ldc); // if (result == cudaSuccess) { // cout << "ReferenceGemm success" << endl; // } break; } case 't': { dim3 gridDim; dim3 blockDim; // blockDim.x must be a multple of warpSize // 128x4 means we have 16 warps and a block computes a 64x64 output tile blockDim.x = 128; blockDim.y = 4; gridDim.x = (M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32); gridDim.y = (N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y); assert(half_input); wmma_sgemm_kernel<<<gridDim, blockDim>>>(A, B, C, D, M, N, K, alpha, beta); } case 100: { int stride_x = 64; int stride_y = 64; int grid_x = (N + stride_x - 1) / stride_x; int grid_y = (M + stride_y - 1) / stride_y; int block_x = stride_x; dim3 grid_d(grid_x, grid_y, 1); dim3 block_d(block_x, 1, 1); // std::cout << grid_x << " " << grid_y << " " << block_x << std::endl; assert(trans_a); for (int n = 0; n < cycle_count; ++n) { cuda_kernel_sgemm_100<<<grid_d, block_d>>>(dev_at, dev_b, dev_c, M, N, K, alpha, beta); // cuda_kernel_sgemm_100_tex<<<grid_d, block_d>>>(dev_at, dev_b, dev_c, M, N, K, alpha, beta); // cuda_kernel_sgemm_100_v2<<<grid_d, block_d>>>(dev_at, dev_b, dev_c, M, N, K, alpha, beta); } break; } case 101: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { ampere_sgemm_128x256x8_kernel<<<grid, 256>>>(dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 102: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { ampere_sgemm_my_opt_128x256x8_kernel_no_pingpong<<<grid, 256>>>( dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 103: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { ampere_sgemm_my_opt_128x256x8_kernel_sm_pingpong<<<grid, 256>>>( dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } case 104: { dim3 grid((N + 255) / 256, (M + 127) / 128); for (int n = 0; n < cycle_count; ++n) { ampere_sgemm_my_opt_128x256x8_kernel_sm_reg_pingpong<<<grid, 256>>>( dev_a, dev_b, dev_c, M, N, K, N * sizeof(float) * 8, 1); } break; } } cudaEventCreate(&stop); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); // cudaDeviceSynchronize(); float GFLOPs = cycle_count * flop / msecTotal / 1e+6; float compute_peak_ratio = 0.0; if (half_input) { compute_peak_ratio = GFLOPs / 312000.0f * 100; } else { compute_peak_ratio = GFLOPs / 19500.0f * 100; } printf("Processing time: %f (ms), GFLOPS: %.6f, compute_peak_ratio: %.2f\%\n", msecTotal / cycle_count, GFLOPs, compute_peak_ratio); float* ct = (float*)malloc(M * N * sizeof(float)); if (kernel_type == 100) { cudaMemcpy(ct, dev_c, M * N * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { c[j * N + i] = ct[i * M + j]; } } } else { cudaMemcpy(c, dev_c, M * N * sizeof(float), cudaMemcpyDeviceToHost); } cublasDestroy(handle); if (half_input) { checkCudaErrors(cudaFree((void*)A)); checkCudaErrors(cudaFree((void*)B)); checkCudaErrors(cudaFree((void*)C)); checkCudaErrors(cudaFree((void*)D)); } else { checkCudaErrors(cudaFree(dev_a)); checkCudaErrors(cudaFree(dev_b)); if (trans_a) { checkCudaErrors(cudaFree(dev_at)); } } checkCudaErrors(cudaFree(dev_c)); free(at); free(ch); free(ct); } void gpu_warmup() { float* dev_p = 0; hs_timer timer; timer.tic("gpu warmup"); cudaMalloc((void**)&dev_p, 16 * 32 * sizeof(float)); cuda_kernel_warmup<<<16, 32>>>(dev_p); cudaDeviceSynchronize(); cudaFree(dev_p); timer.toc("gpu warmup"); } // void cpu_kernel_sgemm_0(float *a, float *b, float *c, size_t N, size_t M, size_t K, float alpha, float beta) { // for (int m = 0; m < M; ++m) { // for (int n = 0; n < N; ++n) { // float acc = 0.0f; // for (int k = 0; k < K; ++k) { // acc += a[m * K + k] * b[k * N + n]; // } // c[m * N + n] = alpha * acc + beta * c[m * N + n]; // } // } // } void cpu_sgemm(float* a, float* b, float* c, size_t N, size_t M, size_t K, float alpha, float beta, int kernel_type) { hs_timer timer; timer.tic("cpu sgemm"); switch (kernel_type) { case 0: { cpu_kernel_sgemm_0(a, b, c, N, M, K, alpha, beta); break; } case 'm': { cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, alpha, a, K, b, N, beta, c, N); break; } } timer.toc("cpu sgemm"); } void cpu_warmup() { hs_timer timer; timer.tic("cpu warmup"); const size_t arr_size = 1024; float* p = new float[arr_size]; #pragma omp parallel for simd for (size_t i = 0; i < arr_size; i++) { float f = (float)i; p[i] = f * f * f; } delete p; timer.toc("cpu warmup"); } __device__ void sgemm_block_64x64_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta) { __shared__ float a_b_shm[2 * 16 * 64]; int block_x = blockIdx.x; int block_y = blockIdx.y; // float* a = pa + block_y * 64; // float* b = pb + block_x * 64; // float* c = pc + block_x * 64 * M + block_y * 64; // int tid = threadIdx.x & 0x3f; int tid = threadIdx.x; int ldx = tid >= 32 ? N : M; int tid2 = (tid >> 4) & 1; // int tid15 = tid & 0xf; int tid15 = tid & 15; int a_b_offset = tid >= 32 ? block_x * 64 : block_y * 64; int track0 = a_b_offset + tid2 * ldx + tid15 * 4; int track2 = track0 + 2 * ldx; int track4 = track0 + 4 * ldx; int track6 = track0 + 6 * ldx; int end = track0 + (K - 8) * ldx; int write_offset = tid2 * 64 + tid15 * 4; write_offset += tid >= 32 ? 512 : 0; int readAs = ((tid >> 1) & 7) << 2; int readBs = ((((tid & 0x30) >> 3) | (tid & 1)) << 2) + 512; #if USE_TEXTURE floatTex tex = tid >= 32 ? tex1DRefB : tex1DRefA; #else float* read_addr = tid >= 32 ? b : a; #endif float cbb00 = 0, cbb01 = 0, cbb02 = 0, cbb03 = 0; float cbb10 = 0, cbb11 = 0, cbb12 = 0, cbb13 = 0; float cbb20 = 0, cbb21 = 0, cbb22 = 0, cbb23 = 0; float cbb30 = 0, cbb31 = 0, cbb32 = 0, cbb33 = 0; float cba00 = 0, cba01 = 0, cba02 = 0, cba03 = 0; float cba10 = 0, cba11 = 0, cba12 = 0, cba13 = 0; float cba20 = 0, cba21 = 0, cba22 = 0, cba23 = 0; float cba30 = 0, cba31 = 0, cba32 = 0, cba33 = 0; float cab00 = 0, cab01 = 0, cab02 = 0, cab03 = 0; float cab10 = 0, cab11 = 0, cab12 = 0, cab13 = 0; float cab20 = 0, cab21 = 0, cab22 = 0, cab23 = 0; float cab30 = 0, cab31 = 0, cab32 = 0, cab33 = 0; float caa00 = 0, caa01 = 0, caa02 = 0, caa03 = 0; float caa10 = 0, caa11 = 0, caa12 = 0, caa13 = 0; float caa20 = 0, caa21 = 0, caa22 = 0, caa23 = 0; float caa30 = 0, caa31 = 0, caa32 = 0, caa33 = 0; // float cbb00, cbb01, cbb02, cbb03; // float cbb10, cbb11, cbb12, cbb13; // float cbb20, cbb21, cbb22, cbb23; // float cbb30, cbb31, cbb32, cbb33; // float cba00, cba01, cba02, cba03; // float cba10, cba11, cba12, cba13; // float cba20, cba21, cba22, cba23; // float cba30, cba31, cba32, cba33; // float cab00, cab01, cab02, cab03; // float cab10, cab11, cab12, cab13; // float cab20, cab21, cab22, cab23; // float cab30, cab31, cab32, cab33; // float caa00, caa01, caa02, caa03; // float caa10, caa11, caa12, caa13; // float caa20, caa21, caa22, caa23; // float caa30, caa31, caa32, caa33; float j0Ab00, j0Ab01, j0Ab02, j0Ab03; float j0Bb00, j0Bb01, j0Bb02, j0Bb03; float j0Aa00, j0Aa01, j0Aa02, j0Aa03; float j0Ba00, j0Ba01, j0Ba02, j0Ba03; // float j1Ab00, j1Ab01, j1Ab02, j1Ab03; // float j1Bb00, j1Bb01, j1Bb02, j1Bb03; // float j1Aa00, j1Aa01, j1Aa02, j1Aa03; // float j1Ba00, j1Ba01, j1Ba02, j1Ba03; // float j0Ab00=1, j0Ab01=1, j0Ab02=1, j0Ab03=1; // float j0Bb00=1, j0Bb01=1, j0Bb02=1, j0Bb03=1; // float j0Aa00=1, j0Aa01=1, j0Aa02=1, j0Aa03=1; // float j0Ba00=1, j0Ba01=1, j0Ba02=1, j0Ba03=1; // float j1Ab00=1, j1Ab01=1, j1Ab02=1, j1Ab03=1; // float j1Bb00=1, j1Bb01=1, j1Bb02=1, j1Bb03=1; // float j1Aa00=1, j1Aa01=1, j1Aa02=1, j1Aa03=1; // float j1Ba00=1, j1Ba01=1, j1Ba02=1, j1Ba03=1; while (track0 <= end) { #if USE_TEXTURE a_b_shm[write_offset + 0 * 64 + 0] = tex1Dfetch(tex, track0 + 0); a_b_shm[write_offset + 0 * 64 + 1] = tex1Dfetch(tex, track0 + 1); a_b_shm[write_offset + 0 * 64 + 2] = tex1Dfetch(tex, track0 + 2); a_b_shm[write_offset + 0 * 64 + 3] = tex1Dfetch(tex, track0 + 3); a_b_shm[write_offset + 2 * 64 + 0] = tex1Dfetch(tex, track2 + 0); a_b_shm[write_offset + 2 * 64 + 1] = tex1Dfetch(tex, track2 + 1); a_b_shm[write_offset + 2 * 64 + 2] = tex1Dfetch(tex, track2 + 2); a_b_shm[write_offset + 2 * 64 + 3] = tex1Dfetch(tex, track2 + 3); a_b_shm[write_offset + 4 * 64 + 0] = tex1Dfetch(tex, track4 + 0); a_b_shm[write_offset + 4 * 64 + 1] = tex1Dfetch(tex, track4 + 1); a_b_shm[write_offset + 4 * 64 + 2] = tex1Dfetch(tex, track4 + 2); a_b_shm[write_offset + 4 * 64 + 3] = tex1Dfetch(tex, track4 + 3); a_b_shm[write_offset + 6 * 64 + 0] = tex1Dfetch(tex, track6 + 0); a_b_shm[write_offset + 6 * 64 + 1] = tex1Dfetch(tex, track6 + 1); a_b_shm[write_offset + 6 * 64 + 2] = tex1Dfetch(tex, track6 + 2); a_b_shm[write_offset + 6 * 64 + 3] = tex1Dfetch(tex, track6 + 3); #else a_b_shm[write_offset + 0 * 64 + 0] = read_addr[track0 + 0]; a_b_shm[write_offset + 0 * 64 + 1] = read_addr[track0 + 1]; a_b_shm[write_offset + 0 * 64 + 2] = read_addr[track0 + 2]; a_b_shm[write_offset + 0 * 64 + 3] = read_addr[track0 + 3]; a_b_shm[write_offset + 2 * 64 + 0] = read_addr[track2 + 0]; a_b_shm[write_offset + 2 * 64 + 1] = read_addr[track2 + 1]; a_b_shm[write_offset + 2 * 64 + 2] = read_addr[track2 + 2]; a_b_shm[write_offset + 2 * 64 + 3] = read_addr[track2 + 3]; a_b_shm[write_offset + 4 * 64 + 0] = read_addr[track4 + 0]; a_b_shm[write_offset + 4 * 64 + 1] = read_addr[track4 + 1]; a_b_shm[write_offset + 4 * 64 + 2] = read_addr[track4 + 2]; a_b_shm[write_offset + 4 * 64 + 3] = read_addr[track4 + 3]; a_b_shm[write_offset + 6 * 64 + 0] = read_addr[track6 + 0]; a_b_shm[write_offset + 6 * 64 + 1] = read_addr[track6 + 1]; a_b_shm[write_offset + 6 * 64 + 2] = read_addr[track6 + 2]; a_b_shm[write_offset + 6 * 64 + 3] = read_addr[track6 + 3]; #endif __syncthreads(); // __syncwarp(0xFFFFFFFF); write_offset ^= 16 * 64; track0 += 8 * ldx; track2 += 8 * ldx; track4 += 8 * ldx; track6 += 8 * ldx; for (int j = 0; j < 8; ++j) { // int prefetch = (j + 1) % 8; int prefetch = j; j0Ab00 = a_b_shm[readAs + prefetch * 64 + 0]; j0Ab01 = a_b_shm[readAs + prefetch * 64 + 1]; j0Ab02 = a_b_shm[readAs + prefetch * 64 + 2]; j0Ab03 = a_b_shm[readAs + prefetch * 64 + 3]; j0Bb00 = a_b_shm[readBs + prefetch * 64 + 0]; j0Bb01 = a_b_shm[readBs + prefetch * 64 + 1]; j0Bb02 = a_b_shm[readBs + prefetch * 64 + 2]; j0Bb03 = a_b_shm[readBs + prefetch * 64 + 3]; j0Aa00 = a_b_shm[readAs + prefetch * 64 + 32 + 0]; j0Aa01 = a_b_shm[readAs + prefetch * 64 + 32 + 1]; j0Aa02 = a_b_shm[readAs + prefetch * 64 + 32 + 2]; j0Aa03 = a_b_shm[readAs + prefetch * 64 + 32 + 3]; j0Ba00 = a_b_shm[readBs + prefetch * 64 + 32 + 0]; j0Ba01 = a_b_shm[readBs + prefetch * 64 + 32 + 1]; j0Ba02 = a_b_shm[readBs + prefetch * 64 + 32 + 2]; j0Ba03 = a_b_shm[readBs + prefetch * 64 + 32 + 3]; cbb00 += j0Bb00 * j0Ab00; cbb01 += j0Bb00 * j0Ab01; // j1Ab00 = a_b_shm[readAs + prefetch * 64 + 0]; // j1Ab01 = a_b_shm[readAs + prefetch * 64 + 1]; // j1Ab02 = a_b_shm[readAs + prefetch * 64 + 2]; // j1Ab03 = a_b_shm[readAs + prefetch * 64 + 3]; cbb02 += j0Bb00 * j0Ab02; cbb03 += j0Bb00 * j0Ab03; // j1Bb00 = a_b_shm[readBs + prefetch * 64 + 0]; // j1Bb01 = a_b_shm[readBs + prefetch * 64 + 1]; // j1Bb02 = a_b_shm[readBs + prefetch * 64 + 2]; // j1Bb03 = a_b_shm[readBs + prefetch * 64 + 3]; cbb10 += j0Bb01 * j0Ab00; cbb11 += j0Bb01 * j0Ab01; // j1Aa00 = a_b_shm[readAs + prefetch * 64 + 32 + 0]; // j1Aa01 = a_b_shm[readAs + prefetch * 64 + 32 + 1]; // j1Aa02 = a_b_shm[readAs + prefetch * 64 + 32 + 2]; // j1Aa03 = a_b_shm[readAs + prefetch * 64 + 32 + 3]; cbb12 += j0Bb01 * j0Ab02; cbb13 += j0Bb01 * j0Ab03; // j1Ba00 = a_b_shm[readBs + prefetch * 64 + 32 + 0]; // j1Ba01 = a_b_shm[readBs + prefetch * 64 + 32 + 1]; // j1Ba02 = a_b_shm[readBs + prefetch * 64 + 32 + 2]; // j1Ba03 = a_b_shm[readBs + prefetch * 64 + 32 + 3]; cbb20 += j0Bb02 * j0Ab00; cbb21 += j0Bb02 * j0Ab01; cbb22 += j0Bb02 * j0Ab02; cbb23 += j0Bb02 * j0Ab03; cbb30 += j0Bb03 * j0Ab00; cbb31 += j0Bb03 * j0Ab01; cbb32 += j0Bb03 * j0Ab02; cbb33 += j0Bb03 * j0Ab03; cba00 += j0Ba00 * j0Ab00; cba01 += j0Ba00 * j0Ab01; cba02 += j0Ba00 * j0Ab02; cba03 += j0Ba00 * j0Ab03; cba10 += j0Ba01 * j0Ab00; cba11 += j0Ba01 * j0Ab01; cba12 += j0Ba01 * j0Ab02; cba13 += j0Ba01 * j0Ab03; cba20 += j0Ba02 * j0Ab00; cba21 += j0Ba02 * j0Ab01; cba22 += j0Ba02 * j0Ab02; cba23 += j0Ba02 * j0Ab03; cba30 += j0Ba03 * j0Ab00; cba31 += j0Ba03 * j0Ab01; cba32 += j0Ba03 * j0Ab02; cba33 += j0Ba03 * j0Ab03; cab00 += j0Bb00 * j0Aa00; cab01 += j0Bb00 * j0Aa01; cab02 += j0Bb00 * j0Aa02; cab03 += j0Bb00 * j0Aa03; cab10 += j0Bb01 * j0Aa00; cab11 += j0Bb01 * j0Aa01; cab12 += j0Bb01 * j0Aa02; cab13 += j0Bb01 * j0Aa03; cab20 += j0Bb02 * j0Aa00; cab21 += j0Bb02 * j0Aa01; cab22 += j0Bb02 * j0Aa02; cab23 += j0Bb02 * j0Aa03; cab30 += j0Bb03 * j0Aa00; cab31 += j0Bb03 * j0Aa01; cab32 += j0Bb03 * j0Aa02; cab33 += j0Bb03 * j0Aa03; caa00 += j0Ba00 * j0Aa00; caa01 += j0Ba00 * j0Aa01; caa02 += j0Ba00 * j0Aa02; caa03 += j0Ba00 * j0Aa03; caa10 += j0Ba01 * j0Aa00; caa11 += j0Ba01 * j0Aa01; caa12 += j0Ba01 * j0Aa02; caa13 += j0Ba01 * j0Aa03; caa20 += j0Ba02 * j0Aa00; caa21 += j0Ba02 * j0Aa01; caa22 += j0Ba02 * j0Aa02; caa23 += j0Ba02 * j0Aa03; caa30 += j0Ba03 * j0Aa00; caa31 += j0Ba03 * j0Aa01; caa32 += j0Ba03 * j0Aa02; caa33 += j0Ba03 * j0Aa03; } readAs ^= 16 * 64; readBs ^= 16 * 64; } __syncthreads(); int tid31 = tid & 31; int tid32 = tid & 32; int coord_x = readBs & 0x7f; int coord_y = readAs & 0x7f; int writeCs = coord_x / 4 * 64 + coord_y; int readCs = (tid32 << 3) + tid31; int ldc4 = M * 4; int Cy00 = block_x * 64 * M + block_y * 64 + (tid32 >> 1) * M + tid31; int Cy04 = Cy00 + ldc4; int Cy08 = Cy00 + 2 * ldc4; int Cy12 = Cy00 + 3 * ldc4; a_b_shm[writeCs + 0] = cbb00; a_b_shm[writeCs + 1] = cbb01; a_b_shm[writeCs + 2] = cbb02; a_b_shm[writeCs + 3] = cbb03; a_b_shm[writeCs + 32 + 0] = cab00; a_b_shm[writeCs + 32 + 1] = cab01; a_b_shm[writeCs + 32 + 2] = cab02; a_b_shm[writeCs + 32 + 3] = cab03; // if (threadIdx.x == 1) { // printf("reg r0, c4: %f\n", cbb00); // } // if (threadIdx.x == 18) { // printf("reg r7, c8: %f\n", cbb03); // printf("reg r39, c8: %f\n", cab03); // } cbb00 = a_b_shm[readCs + 0 * 64 + 0]; cbb01 = a_b_shm[readCs + 0 * 64 + 32]; cbb02 = a_b_shm[readCs + 1 * 64 + 0]; cbb03 = a_b_shm[readCs + 1 * 64 + 32]; cab00 = a_b_shm[readCs + 2 * 64 + 0]; cab01 = a_b_shm[readCs + 2 * 64 + 32]; cab02 = a_b_shm[readCs + 3 * 64 + 0]; cab03 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb00; c[Cy00 + 32] = cbb01; c[Cy04 + 0] = cbb02; c[Cy04 + 32] = cbb03; c[Cy08 + 0] = cab00; c[Cy08 + 32] = cab01; c[Cy12 + 0] = cab02; c[Cy12 + 32] = cab03; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb10; a_b_shm[writeCs + 1] = cbb11; a_b_shm[writeCs + 2] = cbb12; a_b_shm[writeCs + 3] = cbb13; a_b_shm[writeCs + 32 + 0] = cab10; a_b_shm[writeCs + 32 + 1] = cab11; a_b_shm[writeCs + 32 + 2] = cab12; a_b_shm[writeCs + 32 + 3] = cab13; cbb10 = a_b_shm[readCs + 0 * 64 + 0]; cbb11 = a_b_shm[readCs + 0 * 64 + 32]; cbb12 = a_b_shm[readCs + 1 * 64 + 0]; cbb13 = a_b_shm[readCs + 1 * 64 + 32]; cab10 = a_b_shm[readCs + 2 * 64 + 0]; cab11 = a_b_shm[readCs + 2 * 64 + 32]; cab12 = a_b_shm[readCs + 3 * 64 + 0]; cab13 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb10; c[Cy00 + 32] = cbb11; c[Cy04 + 0] = cbb12; c[Cy04 + 32] = cbb13; c[Cy08 + 0] = cab10; c[Cy08 + 32] = cab11; c[Cy12 + 0] = cab12; c[Cy12 + 32] = cab13; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb20; a_b_shm[writeCs + 1] = cbb21; a_b_shm[writeCs + 2] = cbb22; a_b_shm[writeCs + 3] = cbb23; a_b_shm[writeCs + 32 + 0] = cab20; a_b_shm[writeCs + 32 + 1] = cab21; a_b_shm[writeCs + 32 + 2] = cab22; a_b_shm[writeCs + 32 + 3] = cab23; cbb20 = a_b_shm[readCs + 0 * 64 + 0]; cbb21 = a_b_shm[readCs + 0 * 64 + 32]; cbb22 = a_b_shm[readCs + 1 * 64 + 0]; cbb23 = a_b_shm[readCs + 1 * 64 + 32]; cab20 = a_b_shm[readCs + 2 * 64 + 0]; cab21 = a_b_shm[readCs + 2 * 64 + 32]; cab22 = a_b_shm[readCs + 3 * 64 + 0]; cab23 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb20; c[Cy00 + 32] = cbb21; c[Cy04 + 0] = cbb22; c[Cy04 + 32] = cbb23; c[Cy08 + 0] = cab20; c[Cy08 + 32] = cab21; c[Cy12 + 0] = cab22; c[Cy12 + 32] = cab23; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cbb30; a_b_shm[writeCs + 1] = cbb31; a_b_shm[writeCs + 2] = cbb32; a_b_shm[writeCs + 3] = cbb33; a_b_shm[writeCs + 32 + 0] = cab30; a_b_shm[writeCs + 32 + 1] = cab31; a_b_shm[writeCs + 32 + 2] = cab32; a_b_shm[writeCs + 32 + 3] = cab33; cbb30 = a_b_shm[readCs + 0 * 64 + 0]; cbb31 = a_b_shm[readCs + 0 * 64 + 32]; cbb32 = a_b_shm[readCs + 1 * 64 + 0]; cbb33 = a_b_shm[readCs + 1 * 64 + 32]; cab30 = a_b_shm[readCs + 2 * 64 + 0]; cab31 = a_b_shm[readCs + 2 * 64 + 32]; cab32 = a_b_shm[readCs + 3 * 64 + 0]; cab33 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cbb30; c[Cy00 + 32] = cbb31; c[Cy04 + 0] = cbb32; c[Cy04 + 32] = cbb33; c[Cy08 + 0] = cab30; c[Cy08 + 32] = cab31; c[Cy12 + 0] = cab32; c[Cy12 + 32] = cab33; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; Cy00 += 28 * M; Cy04 += 28 * M; Cy08 += 28 * M; Cy12 += 28 * M; a_b_shm[writeCs + 0] = cba00; a_b_shm[writeCs + 1] = cba01; a_b_shm[writeCs + 2] = cba02; a_b_shm[writeCs + 3] = cba03; a_b_shm[writeCs + 32 + 0] = caa00; a_b_shm[writeCs + 32 + 1] = caa01; a_b_shm[writeCs + 32 + 2] = caa02; a_b_shm[writeCs + 32 + 3] = caa03; cba00 = a_b_shm[readCs + 0 * 64 + 0]; cba01 = a_b_shm[readCs + 0 * 64 + 32]; cba02 = a_b_shm[readCs + 1 * 64 + 0]; cba03 = a_b_shm[readCs + 1 * 64 + 32]; caa00 = a_b_shm[readCs + 2 * 64 + 0]; caa01 = a_b_shm[readCs + 2 * 64 + 32]; caa02 = a_b_shm[readCs + 3 * 64 + 0]; caa03 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba00; c[Cy00 + 32] = cba01; c[Cy04 + 0] = cba02; c[Cy04 + 32] = cba03; c[Cy08 + 0] = caa00; c[Cy08 + 32] = caa01; c[Cy12 + 0] = caa02; c[Cy12 + 32] = caa03; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba10; a_b_shm[writeCs + 1] = cba11; a_b_shm[writeCs + 2] = cba12; a_b_shm[writeCs + 3] = cba13; a_b_shm[writeCs + 32 + 0] = caa10; a_b_shm[writeCs + 32 + 1] = caa11; a_b_shm[writeCs + 32 + 2] = caa12; a_b_shm[writeCs + 32 + 3] = caa13; cba10 = a_b_shm[readCs + 0 * 64 + 0]; cba11 = a_b_shm[readCs + 0 * 64 + 32]; cba12 = a_b_shm[readCs + 1 * 64 + 0]; cba13 = a_b_shm[readCs + 1 * 64 + 32]; caa10 = a_b_shm[readCs + 2 * 64 + 0]; caa11 = a_b_shm[readCs + 2 * 64 + 32]; caa12 = a_b_shm[readCs + 3 * 64 + 0]; caa13 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba10; c[Cy00 + 32] = cba11; c[Cy04 + 0] = cba12; c[Cy04 + 32] = cba13; c[Cy08 + 0] = caa10; c[Cy08 + 32] = caa11; c[Cy12 + 0] = caa12; c[Cy12 + 32] = caa13; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba20; a_b_shm[writeCs + 1] = cba21; a_b_shm[writeCs + 2] = cba22; a_b_shm[writeCs + 3] = cba23; a_b_shm[writeCs + 32 + 0] = caa20; a_b_shm[writeCs + 32 + 1] = caa21; a_b_shm[writeCs + 32 + 2] = caa22; a_b_shm[writeCs + 32 + 3] = caa23; cba20 = a_b_shm[readCs + 0 * 64 + 0]; cba21 = a_b_shm[readCs + 0 * 64 + 32]; cba22 = a_b_shm[readCs + 1 * 64 + 0]; cba23 = a_b_shm[readCs + 1 * 64 + 32]; caa20 = a_b_shm[readCs + 2 * 64 + 0]; caa21 = a_b_shm[readCs + 2 * 64 + 32]; caa22 = a_b_shm[readCs + 3 * 64 + 0]; caa23 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba20; c[Cy00 + 32] = cba21; c[Cy04 + 0] = cba22; c[Cy04 + 32] = cba23; c[Cy08 + 0] = caa20; c[Cy08 + 32] = caa21; c[Cy12 + 0] = caa22; c[Cy12 + 32] = caa23; Cy00 += M; Cy04 += M; Cy08 += M; Cy12 += M; a_b_shm[writeCs + 0] = cba30; a_b_shm[writeCs + 1] = cba31; a_b_shm[writeCs + 2] = cba32; a_b_shm[writeCs + 3] = cba33; a_b_shm[writeCs + 32 + 0] = caa30; a_b_shm[writeCs + 32 + 1] = caa31; a_b_shm[writeCs + 32 + 2] = caa32; a_b_shm[writeCs + 32 + 3] = caa33; cba30 = a_b_shm[readCs + 0 * 64 + 0]; cba31 = a_b_shm[readCs + 0 * 64 + 32]; cba32 = a_b_shm[readCs + 1 * 64 + 0]; cba33 = a_b_shm[readCs + 1 * 64 + 32]; caa30 = a_b_shm[readCs + 2 * 64 + 0]; caa31 = a_b_shm[readCs + 2 * 64 + 32]; caa32 = a_b_shm[readCs + 3 * 64 + 0]; caa33 = a_b_shm[readCs + 3 * 64 + 32]; c[Cy00 + 0] = cba30; c[Cy00 + 32] = cba31; c[Cy04 + 0] = cba32; c[Cy04 + 32] = cba33; c[Cy08 + 0] = caa30; c[Cy08 + 32] = caa31; c[Cy12 + 0] = caa32; c[Cy12 + 32] = caa33; } __global__ void cuda_kernel_sgemm_100_tex( float* a, float* b, float* c, size_t M, size_t N, size_t K, float alpha, float beta) { sgemm_block_64x64_tex(a, b, c, M, N, K, alpha, beta); }
d47bb7764377b332e025305a57dd7f74ef41d742.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Compute potential energy for a system of particles Miguel Aragon Calvo Apr/2010 "This software contains source code provided by NVIDIA Corporation." "Glue c code based on galaxy collision demo" History: - 10/05/2010 First working implementation - 01/06/2010 Add softening */ /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #include <unistd.h> #include <stdio.h> #include <math.h> #define BLOCKDIM 256 __constant__ float softeningSquared; // Macros to simplify shared memory addressing #define SX(i) sharedPos[i+blockDim.x*threadIdx.y] //================================== // Modified function to compute reduced potential G=1, M=1. //================================== __device__ float bodyBodyInteraction(float invSum, float4 pos_j, float4 pos_i) { //--- Distance vector float3 r; r.x = pos_i.x - pos_j.x; r.y = pos_i.y - pos_j.y; r.z = pos_i.z - pos_j.z; //--- Squared distance plus softening float distSqr = r.x*r.x + r.y*r.y + r.z*r.z; //--- Avoid itself if (distSqr != 0) { float invDist = (pos_i.w * pos_j.w) / sqrtf(distSqr + softeningSquared); invSum += invDist; } return invSum; } //================================== // This is the "tile_calculation" function from the GPUG3 article. //================================== __device__ float tile_potential(float4 myPos, float pot) { extern __shared__ float4 sharedPos[]; unsigned long i = 0; //--- Here we unroll the loop: LOOP_UNROLL = 4 for (unsigned int counter = 0; counter < blockDim.x; ) { pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); counter += 4; } return pot; } //================================== // WRAP is used to force each block to start working on a different // chunk (and wrap around back to the beginning of the array) so that // not all multiprocessors try to read the same memory locations at once. //================================== #define WRAP(x,m) (((x)<m)?(x):(x-m)) // Mod without divide, works on values from 0 up to 2m __device__ float computePotential(float4 bodyPos, float4* positions, int numBodies){ extern __shared__ float4 sharedPos[]; float pot = 0.0f; int p = blockDim.x; int q = blockDim.y; int n = numBodies; int numTiles = n / (p * q); for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++) { sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x]; __syncthreads(); //--- This is the "tile_calculation" function from the GPUG3 article. //pot = tile_potential(bodyPos, pot); __syncthreads(); } return pot; } __global__ void integrateBodies(float4* Pos, float* poten, int numBodies){ //--- Get the index of this thread ? int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 pos_i = Pos[index]; //--- Return potential float pot = computePotential(pos_i, Pos, numBodies); //--- Put potential in fourth field (mass) //Pos[index].w = pot; poten[index] = pot; } //============================================================ // //============================================================ //--- Memory buffers on the GPU float4 *pos1; float *pote; int old_buf; int np; int np_nopad; //============================================== //--- Interface routines... //============================================== extern "C" { #define BLOCKSIZE 256 void iniciar(){ printf("test\n"); } //================================== /* Allocate GPU memory and set initial positions */ //================================== void init_nbody(int _n, float *_pos, float *_mass){ int i; // Pad with zero mass particles if not a multiple of blocksize np= (_n/BLOCKSIZE)*BLOCKSIZE; if(np<_n){np = np + BLOCKSIZE;} // Allocate GPU arrays hipMalloc((void **) &pos1, sizeof(float4)*np); hipMalloc((void **) &pote, sizeof(float) *np); //Prepare initial conditions float *posbuf = (float *) malloc(4*sizeof(float)*np); float *potbuf = (float *) malloc( sizeof(float)*np); for(i=0; i<_n; i++){ posbuf[4*i+0] = _pos[3*i+0]; posbuf[4*i+1] = _pos[3*i+1]; posbuf[4*i+2] = _pos[3*i+2]; posbuf[4*i+3] = _mass[i]; potbuf[i] = 0.0f; } // Pad particles for(i=_n; i<np; i++){ posbuf[4*i+0] = 0.0; posbuf[4*i+1] = 0.0; posbuf[4*i+2] = 0.0; posbuf[4*i+3] = 0.0; potbuf[i] = 0.0; } //Copy to GPU old_buf = 1; hipMemcpy(pos1, posbuf, sizeof(float4)*np, hipMemcpyHostToDevice); hipMemcpy(pote, potbuf, sizeof(float )*np, hipMemcpyHostToDevice); np_nopad = _n; } hipEvent_t evt; int underway = 0; //================================== /* Do the actual potential */ //================================== void compute_potential(void) { /* Execute the kernel */ dim3 Dg(np/BLOCKSIZE); dim3 Db(BLOCKSIZE); size_t Ns = 4 * sizeof(float) * BLOCKSIZE; hipEventCreate(&evt); hipLaunchKernelGGL(( integrateBodies) , dim3(Dg), dim3(Db), Ns , 0, pos1, pote, np); hipEventRecord(evt, 0); underway = 1; } //================================== /* Check whether the calculation is done */ //================================== int nbody_finished() { if(hipEventQuery(evt) == hipErrorNotReady){ return 0; } else { hipEventDestroy(evt); underway = 0; return 1; } } //================================== /* Shut down and deallocate */ //================================== void dealloc_nbody(int dump){ if(underway==1) while(nbody_finished()==0); hipFree(pos1); hipFree(pote); } //================================== /* Set softening */ //================================== void set_softening(float eps) { float eps2 = eps*eps; hipMemcpyToSymbol("softeningSquared", &eps2, sizeof(float), 0, hipMemcpyHostToDevice); } //================================== //--- Retrieve positions //================================== void get_positions(float *buf){ if(underway==1) while(nbody_finished()==0); float *pos = (float *) malloc(4*sizeof(float)*np); hipMemcpy(pos, pos1, sizeof(float)*4*np, hipMemcpyDeviceToHost); int i; for(i=0;i<np_nopad;i++){ buf[4*i+0] = pos[4*i+0]; buf[4*i+1] = pos[4*i+1]; buf[4*i+2] = pos[4*i+2]; buf[4*i+3] = pos[4*i+3]; } free(pos); } //================================== //--- Retrieve potential //================================== void get_potential(float *buf){ //--- Wait until computation is finish if(underway==1) while(nbody_finished()==0); float *pot = (float *) malloc(sizeof(float)*np); hipMemcpy(pot, pote, sizeof(float)*np, hipMemcpyDeviceToHost); int i; for(i=0;i<np_nopad;i++) { buf[i] = pot[i]; } free(pot); } } //--- end extern "C"
d47bb7764377b332e025305a57dd7f74ef41d742.cu
/* Compute potential energy for a system of particles Miguel Aragon Calvo Apr/2010 "This software contains source code provided by NVIDIA Corporation." "Glue c code based on galaxy collision demo" History: - 10/05/2010 First working implementation - 01/06/2010 Add softening */ /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #include <unistd.h> #include <stdio.h> #include <math.h> #define BLOCKDIM 256 __constant__ float softeningSquared; // Macros to simplify shared memory addressing #define SX(i) sharedPos[i+blockDim.x*threadIdx.y] //================================== // Modified function to compute reduced potential G=1, M=1. //================================== __device__ float bodyBodyInteraction(float invSum, float4 pos_j, float4 pos_i) { //--- Distance vector float3 r; r.x = pos_i.x - pos_j.x; r.y = pos_i.y - pos_j.y; r.z = pos_i.z - pos_j.z; //--- Squared distance plus softening float distSqr = r.x*r.x + r.y*r.y + r.z*r.z; //--- Avoid itself if (distSqr != 0) { float invDist = (pos_i.w * pos_j.w) / sqrtf(distSqr + softeningSquared); invSum += invDist; } return invSum; } //================================== // This is the "tile_calculation" function from the GPUG3 article. //================================== __device__ float tile_potential(float4 myPos, float pot) { extern __shared__ float4 sharedPos[]; unsigned long i = 0; //--- Here we unroll the loop: LOOP_UNROLL = 4 for (unsigned int counter = 0; counter < blockDim.x; ) { pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); pot = bodyBodyInteraction(pot, SX(i++), myPos); counter += 4; } return pot; } //================================== // WRAP is used to force each block to start working on a different // chunk (and wrap around back to the beginning of the array) so that // not all multiprocessors try to read the same memory locations at once. //================================== #define WRAP(x,m) (((x)<m)?(x):(x-m)) // Mod without divide, works on values from 0 up to 2m __device__ float computePotential(float4 bodyPos, float4* positions, int numBodies){ extern __shared__ float4 sharedPos[]; float pot = 0.0f; int p = blockDim.x; int q = blockDim.y; int n = numBodies; int numTiles = n / (p * q); for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++) { sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x]; __syncthreads(); //--- This is the "tile_calculation" function from the GPUG3 article. //pot = tile_potential(bodyPos, pot); __syncthreads(); } return pot; } __global__ void integrateBodies(float4* Pos, float* poten, int numBodies){ //--- Get the index of this thread ? int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 pos_i = Pos[index]; //--- Return potential float pot = computePotential(pos_i, Pos, numBodies); //--- Put potential in fourth field (mass) //Pos[index].w = pot; poten[index] = pot; } //============================================================ // //============================================================ //--- Memory buffers on the GPU float4 *pos1; float *pote; int old_buf; int np; int np_nopad; //============================================== //--- Interface routines... //============================================== extern "C" { #define BLOCKSIZE 256 void iniciar(){ printf("test\n"); } //================================== /* Allocate GPU memory and set initial positions */ //================================== void init_nbody(int _n, float *_pos, float *_mass){ int i; // Pad with zero mass particles if not a multiple of blocksize np= (_n/BLOCKSIZE)*BLOCKSIZE; if(np<_n){np = np + BLOCKSIZE;} // Allocate GPU arrays cudaMalloc((void **) &pos1, sizeof(float4)*np); cudaMalloc((void **) &pote, sizeof(float) *np); //Prepare initial conditions float *posbuf = (float *) malloc(4*sizeof(float)*np); float *potbuf = (float *) malloc( sizeof(float)*np); for(i=0; i<_n; i++){ posbuf[4*i+0] = _pos[3*i+0]; posbuf[4*i+1] = _pos[3*i+1]; posbuf[4*i+2] = _pos[3*i+2]; posbuf[4*i+3] = _mass[i]; potbuf[i] = 0.0f; } // Pad particles for(i=_n; i<np; i++){ posbuf[4*i+0] = 0.0; posbuf[4*i+1] = 0.0; posbuf[4*i+2] = 0.0; posbuf[4*i+3] = 0.0; potbuf[i] = 0.0; } //Copy to GPU old_buf = 1; cudaMemcpy(pos1, posbuf, sizeof(float4)*np, cudaMemcpyHostToDevice); cudaMemcpy(pote, potbuf, sizeof(float )*np, cudaMemcpyHostToDevice); np_nopad = _n; } cudaEvent_t evt; int underway = 0; //================================== /* Do the actual potential */ //================================== void compute_potential(void) { /* Execute the kernel */ dim3 Dg(np/BLOCKSIZE); dim3 Db(BLOCKSIZE); size_t Ns = 4 * sizeof(float) * BLOCKSIZE; cudaEventCreate(&evt); integrateBodies <<< Dg, Db, Ns >>> (pos1, pote, np); cudaEventRecord(evt, 0); underway = 1; } //================================== /* Check whether the calculation is done */ //================================== int nbody_finished() { if(cudaEventQuery(evt) == cudaErrorNotReady){ return 0; } else { cudaEventDestroy(evt); underway = 0; return 1; } } //================================== /* Shut down and deallocate */ //================================== void dealloc_nbody(int dump){ if(underway==1) while(nbody_finished()==0); cudaFree(pos1); cudaFree(pote); } //================================== /* Set softening */ //================================== void set_softening(float eps) { float eps2 = eps*eps; cudaMemcpyToSymbol("softeningSquared", &eps2, sizeof(float), 0, cudaMemcpyHostToDevice); } //================================== //--- Retrieve positions //================================== void get_positions(float *buf){ if(underway==1) while(nbody_finished()==0); float *pos = (float *) malloc(4*sizeof(float)*np); cudaMemcpy(pos, pos1, sizeof(float)*4*np, cudaMemcpyDeviceToHost); int i; for(i=0;i<np_nopad;i++){ buf[4*i+0] = pos[4*i+0]; buf[4*i+1] = pos[4*i+1]; buf[4*i+2] = pos[4*i+2]; buf[4*i+3] = pos[4*i+3]; } free(pos); } //================================== //--- Retrieve potential //================================== void get_potential(float *buf){ //--- Wait until computation is finish if(underway==1) while(nbody_finished()==0); float *pot = (float *) malloc(sizeof(float)*np); cudaMemcpy(pot, pote, sizeof(float)*np, cudaMemcpyDeviceToHost); int i; for(i=0;i<np_nopad;i++) { buf[i] = pot[i]; } free(pot); } } //--- end extern "C"
35e70512fe55dbe75060c44c8fffdec216df2c51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scene_builder.h" #include <thrust/scan.h> #include "raymath/linear.h" #include "iostream" #include "rayenv/gpu/scene.h" #include "rayenv/gpu/scene.cuh" #include "rayprimitives/gpu/texture.cuh" #include "rayprimitives/gpu/phong.cuh" #include "rayprimitives/gpu/hitable.cuh" #include "rayprimitives/gpu/trimesh.cuh" #include "rayprimitives/gpu/light.cuh" #include "rayprimitives/material.h" #include "gputils/alloc.h" #include "assets.h" namespace rtracer { struct MeshConfig { rprimitives::gpu::Trimesh** meshes; int* ends; rmath::Vec3<int>* indices; rprimitives::Material* mats; rprimitives::TextureCoords* coords; rmath::Vec3<float>* mesh_pos; rmath::Quat<float>* mesh_rot; int n_meshes; }; __global__ void build_meshes(MeshConfig* config) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < config->n_meshes; i += stride) { int begin = i == 0 ? 0 : config->ends[i - 1]; int count = config->ends[i] - begin; rprimitives::gpu::TriInner* triangles = new rprimitives::gpu::TriInner[count]; for (int j = 0; j < count; j++) { rprimitives::gpu::TriInner inner = rprimitives::gpu::TriInner( config->indices[begin + j], config->mats[begin + j], config->coords[begin + j]); triangles[j] = inner; } rprimitives::gpu::Trimesh* mesh = new rprimitives::gpu::Trimesh(triangles, count); mesh->set_position(config->mesh_pos[i]); mesh->set_orientation(config->mesh_rot[i]); config->meshes[i] = mesh; } } struct LightConfig { rprimitives::gpu::Light** lights; rmath::Vec3<float>* point_light_pos; rmath::Vec3<float>* dir_light_dir; rmath::Vec4<float>* point_light_col; rmath::Vec4<float>* dir_light_col; int n_points; int n_directional; }; __global__ void build_lights(LightConfig* config) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < config->n_points + config->n_directional; i += stride) { rprimitives::gpu::Light* light; if (i < config->n_points) { rprimitives::gpu::PointLight* point_light = new rprimitives::gpu::PointLight(); point_light->set_color(config->point_light_col[i]); point_light->set_pos(config->point_light_pos[i]); light = point_light; } else { int j = i - config->n_points; rprimitives::gpu::DirLight* dir_light = new rprimitives::gpu::DirLight(); dir_light->set_color(config->dir_light_col[j]); dir_light->set_shine_dir(config->dir_light_dir[j]); light = dir_light; } config->lights[i] = light; } } renv::gpu::Scene* SceneBuilder::build_gpu_scene(renv::Canvas canvas, renv::Camera camera) { // load assets gputils::TextureBuffer4D<float> atlas = assets::gpu::read_png(atlas_path.c_str()); // flatten meshes std::vector<rmath::Vec3<int>> flattened_triangles{}; std::vector<rprimitives::TextureCoords> flattened_coords{}; std::vector<rprimitives::Material> flattened_mats{}; std::vector<rmath::Vec3<float>> flattened_mesh_pos{}; std::vector<rmath::Quat<float>> flattened_mesh_rot{}; std::vector<int> counts{}; for (MeshBuilder& b : this->meshes) { assert(b.triangles.size() == b.coords.size()); assert(b.triangles.size() == b.mats.size()); counts.push_back(b.triangles.size()); for (int i = 0; i < b.triangles.size(); i++) { flattened_triangles.push_back(b.triangles[i]); flattened_coords.push_back(b.coords[i]); flattened_mats.push_back(b.mats[i]); } flattened_mesh_pos.push_back(b.pos); flattened_mesh_rot.push_back(b.rot); } // build vertex buffer std::vector<rmath::Vec3<float>> normals = generate_normals(); rprimitives::gpu::VertexBuffer buffer{vertices, normals}; // build meshes thrust::inclusive_scan(counts.data(), counts.data() + counts.size(), counts.data()); int* ends = gputils::copy_to_gpu<int>(counts.data(), counts.size()); rprimitives::TextureCoords* dev_coords = gputils::copy_to_gpu<rprimitives::TextureCoords>(flattened_coords.data(), flattened_coords.size()); rprimitives::Material* dev_mats = gputils::copy_to_gpu<rprimitives::Material>(flattened_mats.data(), flattened_mats.size()); rmath::Vec3<int>* dev_tris = gputils::copy_to_gpu<rmath::Vec3<int>>(flattened_triangles.data(), flattened_triangles.size()); rmath::Vec3<float>* dev_mesh_pos = gputils::copy_to_gpu<rmath::Vec3<float>>(flattened_mesh_pos.data(), flattened_mesh_pos.size()); rmath::Quat<float>* dev_mesh_rot = gputils::copy_to_gpu<rmath::Quat<float>>(flattened_mesh_rot.data(), flattened_mesh_rot.size()); rprimitives::gpu::Trimesh** hitables = (rprimitives::gpu::Trimesh**) gputils::create_buffer(counts.size(), sizeof(rprimitives::gpu::Trimesh*)); int n_hitables = this->meshes.size(); MeshConfig mesh_config = { hitables, ends, dev_tris, dev_mats, dev_coords, dev_mesh_pos, dev_mesh_rot, (int) counts.size(), }; MeshConfig* mesh_config_ptr = gputils::copy_to_gpu(&mesh_config, 1); hipLaunchKernelGGL(( build_meshes), dim3(1), dim3(512), 0, 0, mesh_config_ptr); hipFree(mesh_config_ptr); hipFree(ends); hipFree(dev_coords); hipFree(dev_mats); hipFree(dev_tris); // create lights rmath::Vec3<float>* dev_point_light_pos = gputils::copy_to_gpu<rmath::Vec3<float>>(point_light_pos.data(), point_light_pos.size()); rmath::Vec3<float>* dev_dir_light_dir = gputils::copy_to_gpu<rmath::Vec3<float>>(dir_light_dir.data(), dir_light_dir.size()); rmath::Vec4<float>* dev_point_light_col = gputils::copy_to_gpu<rmath::Vec4<float>>(point_light_col.data(), point_light_col.size()); rmath::Vec4<float>* dev_dir_light_col = gputils::copy_to_gpu<rmath::Vec4<float>>(dir_light_col.data(), dir_light_col.size()); int n_point_lights = point_light_col.size(); int n_dir_lights = dir_light_col.size(); int n_lights = n_point_lights + n_dir_lights; rprimitives::gpu::Light** lights = (rprimitives::gpu::Light**) gputils::create_buffer(n_lights, sizeof(rprimitives::gpu::Light*)); LightConfig light_config = {lights, dev_point_light_pos, dev_dir_light_dir, dev_point_light_col, dev_dir_light_col, n_point_lights, n_dir_lights}; LightConfig* light_config_ptr = gputils::copy_to_gpu(&light_config, 1); hipLaunchKernelGGL(( build_lights), dim3(1), dim3(1024), 0, 0, light_config_ptr); hipFree(dev_point_light_pos); hipFree(dev_dir_light_dir); hipFree(dev_point_light_col); hipFree(dev_dir_light_col); hipFree(light_config_ptr); // copy transformations renv::Transformation* trans = gputils::copy_to_gpu(this->trans.data(), this->trans.size()); int n_trans = this->trans.size(); // build environment renv::Environment env{canvas, camera, trans, n_trans}; // configure local scene renv::gpu::Scene local_scene = renv::gpu::Scene{env, atlas, (rprimitives::gpu::Hitable**) hitables, n_hitables, lights, n_lights, buffer}; renv::gpu::Scene* s = (renv::gpu::Scene*) gputils::create_buffer(1, sizeof(renv::gpu::Scene)); hipMemcpy(s, &local_scene, sizeof(renv::gpu::Scene), hipMemcpyDefault); return s; } int SceneBuilder::build_cube(float scale, rprimitives::TextureCoords coords, rprimitives::Material mat) { /* e-----f * /| /| * a-----b | * | g---|-h * |/ |/ * c-----d */ const rmath::Vec3<float> _a{-0.5f, 0.5f, -0.5f}; const rmath::Vec3<float> _b{0.5f, 0.5f, -0.5f}; const rmath::Vec3<float> _c{-0.5f, -0.5f, -0.5f}; const rmath::Vec3<float> _d{0.5f, -0.5f, -0.5f}; const rmath::Vec3<float> _e{-0.5f, 0.5f, 0.5f}; const rmath::Vec3<float> _f{0.5f, 0.5f, 0.5f}; const rmath::Vec3<float> _g{-0.5f, -0.5f, 0.5f}; const rmath::Vec3<float> _h{0.5f, -0.5f, 0.5f}; rmath::Vec3<float> a = scale * _a; rmath::Vec3<float> b = scale * _b; rmath::Vec3<float> c = scale * _c; rmath::Vec3<float> d = scale * _d; rmath::Vec3<float> e = scale * _e; rmath::Vec3<float> f = scale * _f; rmath::Vec3<float> g = scale * _g; rmath::Vec3<float> h = scale * _h; int builder_idx = create_mesh(); MeshBuilder& builder = get_mesh_builder(builder_idx); // front builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(a), add_vertex(b)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(c), add_vertex(a), add_vertex(d)}, coords, mat); // top builder.add_triangle(rmath::Vec3<int>{add_vertex(a), add_vertex(e), add_vertex(b)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(e), add_vertex(f), add_vertex(b)}, coords, mat); // right builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(b), add_vertex(h)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(b), add_vertex(f), add_vertex(h)}, coords, mat); // left builder.add_triangle(rmath::Vec3<int>{add_vertex(c), add_vertex(g), add_vertex(a)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(a), add_vertex(g), add_vertex(e)}, coords, mat); // back builder.add_triangle(rmath::Vec3<int>{add_vertex(g), add_vertex(h), add_vertex(e)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(e), add_vertex(h), add_vertex(f)}, coords, mat); // bottom builder.add_triangle(rmath::Vec3<int>{add_vertex(g), add_vertex(c), add_vertex(d)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(h), add_vertex(g)}, coords, mat); return builder_idx; } }
35e70512fe55dbe75060c44c8fffdec216df2c51.cu
#include "scene_builder.h" #include <thrust/scan.h> #include "raymath/linear.h" #include "iostream" #include "rayenv/gpu/scene.h" #include "rayenv/gpu/scene.cuh" #include "rayprimitives/gpu/texture.cuh" #include "rayprimitives/gpu/phong.cuh" #include "rayprimitives/gpu/hitable.cuh" #include "rayprimitives/gpu/trimesh.cuh" #include "rayprimitives/gpu/light.cuh" #include "rayprimitives/material.h" #include "gputils/alloc.h" #include "assets.h" namespace rtracer { struct MeshConfig { rprimitives::gpu::Trimesh** meshes; int* ends; rmath::Vec3<int>* indices; rprimitives::Material* mats; rprimitives::TextureCoords* coords; rmath::Vec3<float>* mesh_pos; rmath::Quat<float>* mesh_rot; int n_meshes; }; __global__ void build_meshes(MeshConfig* config) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < config->n_meshes; i += stride) { int begin = i == 0 ? 0 : config->ends[i - 1]; int count = config->ends[i] - begin; rprimitives::gpu::TriInner* triangles = new rprimitives::gpu::TriInner[count]; for (int j = 0; j < count; j++) { rprimitives::gpu::TriInner inner = rprimitives::gpu::TriInner( config->indices[begin + j], config->mats[begin + j], config->coords[begin + j]); triangles[j] = inner; } rprimitives::gpu::Trimesh* mesh = new rprimitives::gpu::Trimesh(triangles, count); mesh->set_position(config->mesh_pos[i]); mesh->set_orientation(config->mesh_rot[i]); config->meshes[i] = mesh; } } struct LightConfig { rprimitives::gpu::Light** lights; rmath::Vec3<float>* point_light_pos; rmath::Vec3<float>* dir_light_dir; rmath::Vec4<float>* point_light_col; rmath::Vec4<float>* dir_light_col; int n_points; int n_directional; }; __global__ void build_lights(LightConfig* config) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < config->n_points + config->n_directional; i += stride) { rprimitives::gpu::Light* light; if (i < config->n_points) { rprimitives::gpu::PointLight* point_light = new rprimitives::gpu::PointLight(); point_light->set_color(config->point_light_col[i]); point_light->set_pos(config->point_light_pos[i]); light = point_light; } else { int j = i - config->n_points; rprimitives::gpu::DirLight* dir_light = new rprimitives::gpu::DirLight(); dir_light->set_color(config->dir_light_col[j]); dir_light->set_shine_dir(config->dir_light_dir[j]); light = dir_light; } config->lights[i] = light; } } renv::gpu::Scene* SceneBuilder::build_gpu_scene(renv::Canvas canvas, renv::Camera camera) { // load assets gputils::TextureBuffer4D<float> atlas = assets::gpu::read_png(atlas_path.c_str()); // flatten meshes std::vector<rmath::Vec3<int>> flattened_triangles{}; std::vector<rprimitives::TextureCoords> flattened_coords{}; std::vector<rprimitives::Material> flattened_mats{}; std::vector<rmath::Vec3<float>> flattened_mesh_pos{}; std::vector<rmath::Quat<float>> flattened_mesh_rot{}; std::vector<int> counts{}; for (MeshBuilder& b : this->meshes) { assert(b.triangles.size() == b.coords.size()); assert(b.triangles.size() == b.mats.size()); counts.push_back(b.triangles.size()); for (int i = 0; i < b.triangles.size(); i++) { flattened_triangles.push_back(b.triangles[i]); flattened_coords.push_back(b.coords[i]); flattened_mats.push_back(b.mats[i]); } flattened_mesh_pos.push_back(b.pos); flattened_mesh_rot.push_back(b.rot); } // build vertex buffer std::vector<rmath::Vec3<float>> normals = generate_normals(); rprimitives::gpu::VertexBuffer buffer{vertices, normals}; // build meshes thrust::inclusive_scan(counts.data(), counts.data() + counts.size(), counts.data()); int* ends = gputils::copy_to_gpu<int>(counts.data(), counts.size()); rprimitives::TextureCoords* dev_coords = gputils::copy_to_gpu<rprimitives::TextureCoords>(flattened_coords.data(), flattened_coords.size()); rprimitives::Material* dev_mats = gputils::copy_to_gpu<rprimitives::Material>(flattened_mats.data(), flattened_mats.size()); rmath::Vec3<int>* dev_tris = gputils::copy_to_gpu<rmath::Vec3<int>>(flattened_triangles.data(), flattened_triangles.size()); rmath::Vec3<float>* dev_mesh_pos = gputils::copy_to_gpu<rmath::Vec3<float>>(flattened_mesh_pos.data(), flattened_mesh_pos.size()); rmath::Quat<float>* dev_mesh_rot = gputils::copy_to_gpu<rmath::Quat<float>>(flattened_mesh_rot.data(), flattened_mesh_rot.size()); rprimitives::gpu::Trimesh** hitables = (rprimitives::gpu::Trimesh**) gputils::create_buffer(counts.size(), sizeof(rprimitives::gpu::Trimesh*)); int n_hitables = this->meshes.size(); MeshConfig mesh_config = { hitables, ends, dev_tris, dev_mats, dev_coords, dev_mesh_pos, dev_mesh_rot, (int) counts.size(), }; MeshConfig* mesh_config_ptr = gputils::copy_to_gpu(&mesh_config, 1); build_meshes<<<1, 512>>>(mesh_config_ptr); cudaFree(mesh_config_ptr); cudaFree(ends); cudaFree(dev_coords); cudaFree(dev_mats); cudaFree(dev_tris); // create lights rmath::Vec3<float>* dev_point_light_pos = gputils::copy_to_gpu<rmath::Vec3<float>>(point_light_pos.data(), point_light_pos.size()); rmath::Vec3<float>* dev_dir_light_dir = gputils::copy_to_gpu<rmath::Vec3<float>>(dir_light_dir.data(), dir_light_dir.size()); rmath::Vec4<float>* dev_point_light_col = gputils::copy_to_gpu<rmath::Vec4<float>>(point_light_col.data(), point_light_col.size()); rmath::Vec4<float>* dev_dir_light_col = gputils::copy_to_gpu<rmath::Vec4<float>>(dir_light_col.data(), dir_light_col.size()); int n_point_lights = point_light_col.size(); int n_dir_lights = dir_light_col.size(); int n_lights = n_point_lights + n_dir_lights; rprimitives::gpu::Light** lights = (rprimitives::gpu::Light**) gputils::create_buffer(n_lights, sizeof(rprimitives::gpu::Light*)); LightConfig light_config = {lights, dev_point_light_pos, dev_dir_light_dir, dev_point_light_col, dev_dir_light_col, n_point_lights, n_dir_lights}; LightConfig* light_config_ptr = gputils::copy_to_gpu(&light_config, 1); build_lights<<<1, 1024>>>(light_config_ptr); cudaFree(dev_point_light_pos); cudaFree(dev_dir_light_dir); cudaFree(dev_point_light_col); cudaFree(dev_dir_light_col); cudaFree(light_config_ptr); // copy transformations renv::Transformation* trans = gputils::copy_to_gpu(this->trans.data(), this->trans.size()); int n_trans = this->trans.size(); // build environment renv::Environment env{canvas, camera, trans, n_trans}; // configure local scene renv::gpu::Scene local_scene = renv::gpu::Scene{env, atlas, (rprimitives::gpu::Hitable**) hitables, n_hitables, lights, n_lights, buffer}; renv::gpu::Scene* s = (renv::gpu::Scene*) gputils::create_buffer(1, sizeof(renv::gpu::Scene)); cudaMemcpy(s, &local_scene, sizeof(renv::gpu::Scene), cudaMemcpyDefault); return s; } int SceneBuilder::build_cube(float scale, rprimitives::TextureCoords coords, rprimitives::Material mat) { /* e-----f * /| /| * a-----b | * | g---|-h * |/ |/ * c-----d */ const rmath::Vec3<float> _a{-0.5f, 0.5f, -0.5f}; const rmath::Vec3<float> _b{0.5f, 0.5f, -0.5f}; const rmath::Vec3<float> _c{-0.5f, -0.5f, -0.5f}; const rmath::Vec3<float> _d{0.5f, -0.5f, -0.5f}; const rmath::Vec3<float> _e{-0.5f, 0.5f, 0.5f}; const rmath::Vec3<float> _f{0.5f, 0.5f, 0.5f}; const rmath::Vec3<float> _g{-0.5f, -0.5f, 0.5f}; const rmath::Vec3<float> _h{0.5f, -0.5f, 0.5f}; rmath::Vec3<float> a = scale * _a; rmath::Vec3<float> b = scale * _b; rmath::Vec3<float> c = scale * _c; rmath::Vec3<float> d = scale * _d; rmath::Vec3<float> e = scale * _e; rmath::Vec3<float> f = scale * _f; rmath::Vec3<float> g = scale * _g; rmath::Vec3<float> h = scale * _h; int builder_idx = create_mesh(); MeshBuilder& builder = get_mesh_builder(builder_idx); // front builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(a), add_vertex(b)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(c), add_vertex(a), add_vertex(d)}, coords, mat); // top builder.add_triangle(rmath::Vec3<int>{add_vertex(a), add_vertex(e), add_vertex(b)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(e), add_vertex(f), add_vertex(b)}, coords, mat); // right builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(b), add_vertex(h)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(b), add_vertex(f), add_vertex(h)}, coords, mat); // left builder.add_triangle(rmath::Vec3<int>{add_vertex(c), add_vertex(g), add_vertex(a)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(a), add_vertex(g), add_vertex(e)}, coords, mat); // back builder.add_triangle(rmath::Vec3<int>{add_vertex(g), add_vertex(h), add_vertex(e)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(e), add_vertex(h), add_vertex(f)}, coords, mat); // bottom builder.add_triangle(rmath::Vec3<int>{add_vertex(g), add_vertex(c), add_vertex(d)}, coords, mat); builder.add_triangle(rmath::Vec3<int>{add_vertex(d), add_vertex(h), add_vertex(g)}, coords, mat); return builder_idx; } }
5ddfa1d4b0b4485b68dd67731da56360dfba7f17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "linalg/eltwise2d.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveEltwise2DAddKernel(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < cols * rows) { const auto x = tid % cols; const auto y = tid / cols; const auto d = dPtr[tid]; const auto a = aPtr[y]; const auto b = bPtr[x]; Type accm = alpha * (a + b + d); if (beta) { accm += beta * cPtr[tid]; } dPtr[tid] = accm; } } template <typename Type> void naiveEltwise2DAdd(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { static const int TPB = 64; int nblks = ceildiv(rows * cols, TPB); hipLaunchKernelGGL(( naiveEltwise2DAddKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct Eltwise2dInputs { T tolerance; int w; int h; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const Eltwise2dInputs<T> &dims) { return os; } template <typename Type> void WrapperEltwise2d(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; }; eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0); } template <typename T> class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); auto w = params.w; auto h = params.h; auto len = w * h; allocate(in1, h); allocate(in2, w); allocate(out_ref, len); allocate(out, len); r.uniform(in1, h, T(-1.0), T(1.0)); r.uniform(in2, w, T(-1.0), T(1.0)); naiveEltwise2DAdd(h, w, in1, in2, out_ref, out_ref, (T)1, (T)1); WrapperEltwise2d<T>(h, w, in1, in2, out, out, (T)1, (T)1); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: Eltwise2dInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<Eltwise2dInputs<float>> inputsf2 = { {0.000001f, 1024, 1024, 1234ULL}}; const std::vector<Eltwise2dInputs<double>> inputsd2 = { {0.00000001, 1024, 1024, 1234ULL}}; typedef Eltwise2dTest<float> Eltwise2dTestF; TEST_P(Eltwise2dTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.w * params.h, CompareApprox<float>(params.tolerance))); } typedef Eltwise2dTest<double> Eltwise2dTestD; TEST_P(Eltwise2dTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.w * params.h, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
5ddfa1d4b0b4485b68dd67731da56360dfba7f17.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "linalg/eltwise2d.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveEltwise2DAddKernel(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < cols * rows) { const auto x = tid % cols; const auto y = tid / cols; const auto d = dPtr[tid]; const auto a = aPtr[y]; const auto b = bPtr[x]; Type accm = alpha * (a + b + d); if (beta) { accm += beta * cPtr[tid]; } dPtr[tid] = accm; } } template <typename Type> void naiveEltwise2DAdd(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { static const int TPB = 64; int nblks = ceildiv(rows * cols, TPB); naiveEltwise2DAddKernel<Type><<<nblks, TPB>>>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct Eltwise2dInputs { T tolerance; int w; int h; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const Eltwise2dInputs<T> &dims) { return os; } template <typename Type> void WrapperEltwise2d(int rows, int cols, const Type *aPtr, const Type *bPtr, const Type *cPtr, Type *dPtr, Type alpha, Type beta) { auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; }; eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0); } template <typename T> class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); auto w = params.w; auto h = params.h; auto len = w * h; allocate(in1, h); allocate(in2, w); allocate(out_ref, len); allocate(out, len); r.uniform(in1, h, T(-1.0), T(1.0)); r.uniform(in2, w, T(-1.0), T(1.0)); naiveEltwise2DAdd(h, w, in1, in2, out_ref, out_ref, (T)1, (T)1); WrapperEltwise2d<T>(h, w, in1, in2, out, out, (T)1, (T)1); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: Eltwise2dInputs<T> params; T *in1, *in2, *out_ref, *out; }; const std::vector<Eltwise2dInputs<float>> inputsf2 = { {0.000001f, 1024, 1024, 1234ULL}}; const std::vector<Eltwise2dInputs<double>> inputsd2 = { {0.00000001, 1024, 1024, 1234ULL}}; typedef Eltwise2dTest<float> Eltwise2dTestF; TEST_P(Eltwise2dTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.w * params.h, CompareApprox<float>(params.tolerance))); } typedef Eltwise2dTest<double> Eltwise2dTestD; TEST_P(Eltwise2dTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.w * params.h, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
aadec9433c32531a14e65cd01fcab189a2b59fab.hip
// !!! This is a file automatically generated by hipify!!! #define _SIZE_T_DEFINED #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include <builtin_types.h> #include <vector_functions.h> #include <float.h> extern "C" { // DEVICE KERNELS __forceinline__ __device__ int GetId() { return blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; } // combine two vectors elemetwise with given weight, out = a * weightA + b * weightB __device__ void device_ElementwiseAdd_Weighted( float* a, float* b, float* out, float weightA, float weightB, int count ) { int id = GetId(); if (id < count) { out[id] = a[id] * weightA + b[id] * weightB; } } // GLOBAL KERNELS //Add scalar to each element of the input tensor __global__ void ScalarAdd( float* input, float scalar, float* output, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] + scalar; } } // O_i = scalar * B_i __global__ void ScalarMult( float * output, float * input, float scalar, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] * scalar; } } // O_i = scalar1 * B_i + scalar2 __global__ void ScalarMultThenAdd( float * output, float * input, float scalar1, float scalar2, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] * scalar1 + scalar2; } } __global__ void ElementwiseAbs( float* a, float* result, int count ) { int id = GetId(); if (id < count) { if (a[id] < 0) { result[id] = -a[id]; } else { result[id] = a[id]; } } } __global__ void ElementwiseThreshold( float* output, float* input, float threshold, int count ) { int id = GetId(); if (id < count) { if (input[id] < threshold) { output[id] = threshold; } else { output[id] = input[id]; } } } __global__ void ElementwiseAdd( float* a, float* b, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] + b[id]; } } ///Adds two vectores elementwise. bounding each element of the result __global__ void ElementwiseAdd_Bounded( float* a, float* b, float minBound, float maxBound, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] + b[id]; if (result[id] < minBound) { result[id] = minBound; } if (result[id] > maxBound) { result[id] = maxBound; } } } ///Adds two vectores elementwise result = weightA*a+weightB*b. bounding each element of the result __global__ void ElementwiseAdd_BoundedWeighted( float* result, float* a, float* b, float weightA, float weightB, float minBound, float maxBound, int count ) { int id = GetId(); if (id < count) { result[id] = weightA * a[id] + weightB * b[id]; if (result[id] < minBound) { result[id] = minBound; } if (result[id] > maxBound) { result[id] = maxBound; } } } __global__ void ElementwiseAdd_Offsetted( float* result, float* a, float* b, int resultOffset, // offset in the result int aOffset, // offset in a indexes (0 means no offset) int bOffset, // offset in b indexses int countSubtracted // number of values to be added ) { int id = GetId(); if (id < countSubtracted) { result[id + resultOffset] = a[id + aOffset] + b[id + bOffset]; } } // elementwise adition of two vectors, first vector is (expected to be shorter) used multiple times over and over. Vectors b and out should have the same length. __global__ void ElementwiseAdd_Segmented_Repeat( float* out, float* a, float* b, int lengthA, int count ) { int id = GetId(); int idA; if (id < count) { idA = id % lengthA; out[id] = a[idA] + b[id]; } } // elementwise addition of two vectors, each element of the vector a (it is expected to be shorter than b) is used multiple times over whole segment of the vector b. Vectors b and out should have the same length. __global__ void ElementwiseAdd_Segmented_Stretch( float* out, float* a, float* b, int noSegments, // = length A int count //length of the output and vector b ) { int id = GetId(); int segmentId; if (id < count) { segmentId = id / (count / noSegments); out[id] = a[segmentId] + b[id]; } } __global__ void ElementwiseAdd_Weighted( float* a, float* b, float* out, float weightA, float weightB, int count ) { device_ElementwiseAdd_Weighted(a, b, out, weightA, weightB, count); } // output is wa*a + wb*b, zero otherwise. a and b can be offsetted __global__ void ElementwiseAdd_WeightedOffsetted( float* a, float* b, float* out, float weightA, float weightB, int offsetA, int offsetB, int countA, int countB, int outputCount ) { int id = GetId(); if (id < outputCount) { out[id] = 0.0f; if (id >= offsetA && id < offsetA + countA) { out[id] += weightA * a[id - offsetA]; } if (id >= offsetB && id < offsetB + countB) { out[id] += weightB * b[id - offsetB]; } } } // output = a./b __global__ void ElementwiseDiv( float* a, float* b, float* output, int count ) { int id = GetId(); if (id < count) { output[id] = a[id] / b[id]; //will be NaN if b[id] == 0, which is ok, at least we will notice in observers } } // elementwise multiplication of probabilities __global__ void ElementwiseMult( float* a, float* b, float* out, int count ) { int id = GetId(); if (id < count) { out[id] = a[id] * b[id]; } } // elementwise multiplication of two vectors, first vector is (expected to be shorter) used multiple times over and over. Vectors b and out should have the same length. __global__ void ElementwiseMult_Segmented_Repeat( float* out, float* a, float* b, int lengthA, int count ) { int id = GetId(); int idA; if (id < count) { idA = id % lengthA; out[id] = a[idA] * b[id]; } } // elementwise multiplication of two vectors, each element of the vector a (it is expected to be shorter than b) is used multiple times over whole segment of the vector b. Vectors b and out should have the same length. __global__ void ElementwiseMult_Segmented_Stretch( float* out, float* a, float* b, int lengthA, // = noSegments int count //length of the output and vector b ) { int id = GetId(); int segmentId; if (id < count) { segmentId = id / (count / lengthA); out[id] = a[segmentId] * b[id]; } } __global__ void ElementwiseSub( float* a, float* b, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] - b[id]; } } //multiplies two vectors as matrices so that result is a matrix where output_ij = a_i * b_j __global__ void CrossMult( float* a, float* b, float* output, //matrix m * n, where m = length a and m = length b int lengthA, int lengthB ) { int id = GetId(); int i; int j; if (id < lengthA * lengthB) { i = id / lengthB; j = id % lengthB; output[id] = a[i] * b[j]; } } //multiplies each row from matrixA with each row from matrixB in a crossproduct manner, i.e., the two vectors are multiplied as matrices so that result is a matrix where output_ij = a_i * b_j. The overall result is then a tensor output_ijk, where k goes over rows in the matrices. __global__ void CrossMult_Segmented( float* output, //tensor noColumnsA * noColumnsB * noRows float* matrixA, float* matrixB, int noColumnsA, int noColumnsB, int noRows ) { int id = GetId(); int i, j, k; if (id < noColumnsA * noColumnsB * noRows) { i = id % noColumnsA; //columns in A j = (id / noColumnsA) % noColumnsB; //columns in B k = id / (noColumnsA * noColumnsB); //rows in both A and B, third dimension in the resulting tensor output[id] = matrixA[i + k * noColumnsA] * matrixB[j + k * noColumnsB]; } } __global__ void OtherAverage( float* a, float* b, float* result, int count ) { device_ElementwiseAdd_Weighted(a, b, result, 0.5f, 0.5f, count); } }
aadec9433c32531a14e65cd01fcab189a2b59fab.cu
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include <builtin_types.h> #include <vector_functions.h> #include <float.h> extern "C" { // DEVICE KERNELS __forceinline__ __device__ int GetId() { return blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; } // combine two vectors elemetwise with given weight, out = a * weightA + b * weightB __device__ void device_ElementwiseAdd_Weighted( float* a, float* b, float* out, float weightA, float weightB, int count ) { int id = GetId(); if (id < count) { out[id] = a[id] * weightA + b[id] * weightB; } } // GLOBAL KERNELS //Add scalar to each element of the input tensor __global__ void ScalarAdd( float* input, float scalar, float* output, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] + scalar; } } // O_i = scalar * B_i __global__ void ScalarMult( float * output, float * input, float scalar, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] * scalar; } } // O_i = scalar1 * B_i + scalar2 __global__ void ScalarMultThenAdd( float * output, float * input, float scalar1, float scalar2, int count ) { int id = GetId(); if (id < count) { output[id] = input[id] * scalar1 + scalar2; } } __global__ void ElementwiseAbs( float* a, float* result, int count ) { int id = GetId(); if (id < count) { if (a[id] < 0) { result[id] = -a[id]; } else { result[id] = a[id]; } } } __global__ void ElementwiseThreshold( float* output, float* input, float threshold, int count ) { int id = GetId(); if (id < count) { if (input[id] < threshold) { output[id] = threshold; } else { output[id] = input[id]; } } } __global__ void ElementwiseAdd( float* a, float* b, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] + b[id]; } } ///Adds two vectores elementwise. bounding each element of the result __global__ void ElementwiseAdd_Bounded( float* a, float* b, float minBound, float maxBound, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] + b[id]; if (result[id] < minBound) { result[id] = minBound; } if (result[id] > maxBound) { result[id] = maxBound; } } } ///Adds two vectores elementwise result = weightA*a+weightB*b. bounding each element of the result __global__ void ElementwiseAdd_BoundedWeighted( float* result, float* a, float* b, float weightA, float weightB, float minBound, float maxBound, int count ) { int id = GetId(); if (id < count) { result[id] = weightA * a[id] + weightB * b[id]; if (result[id] < minBound) { result[id] = minBound; } if (result[id] > maxBound) { result[id] = maxBound; } } } __global__ void ElementwiseAdd_Offsetted( float* result, float* a, float* b, int resultOffset, // offset in the result int aOffset, // offset in a indexes (0 means no offset) int bOffset, // offset in b indexses int countSubtracted // number of values to be added ) { int id = GetId(); if (id < countSubtracted) { result[id + resultOffset] = a[id + aOffset] + b[id + bOffset]; } } // elementwise adition of two vectors, first vector is (expected to be shorter) used multiple times over and over. Vectors b and out should have the same length. __global__ void ElementwiseAdd_Segmented_Repeat( float* out, float* a, float* b, int lengthA, int count ) { int id = GetId(); int idA; if (id < count) { idA = id % lengthA; out[id] = a[idA] + b[id]; } } // elementwise addition of two vectors, each element of the vector a (it is expected to be shorter than b) is used multiple times over whole segment of the vector b. Vectors b and out should have the same length. __global__ void ElementwiseAdd_Segmented_Stretch( float* out, float* a, float* b, int noSegments, // = length A int count //length of the output and vector b ) { int id = GetId(); int segmentId; if (id < count) { segmentId = id / (count / noSegments); out[id] = a[segmentId] + b[id]; } } __global__ void ElementwiseAdd_Weighted( float* a, float* b, float* out, float weightA, float weightB, int count ) { device_ElementwiseAdd_Weighted(a, b, out, weightA, weightB, count); } // output is wa*a + wb*b, zero otherwise. a and b can be offsetted __global__ void ElementwiseAdd_WeightedOffsetted( float* a, float* b, float* out, float weightA, float weightB, int offsetA, int offsetB, int countA, int countB, int outputCount ) { int id = GetId(); if (id < outputCount) { out[id] = 0.0f; if (id >= offsetA && id < offsetA + countA) { out[id] += weightA * a[id - offsetA]; } if (id >= offsetB && id < offsetB + countB) { out[id] += weightB * b[id - offsetB]; } } } // output = a./b __global__ void ElementwiseDiv( float* a, float* b, float* output, int count ) { int id = GetId(); if (id < count) { output[id] = a[id] / b[id]; //will be NaN if b[id] == 0, which is ok, at least we will notice in observers } } // elementwise multiplication of probabilities __global__ void ElementwiseMult( float* a, float* b, float* out, int count ) { int id = GetId(); if (id < count) { out[id] = a[id] * b[id]; } } // elementwise multiplication of two vectors, first vector is (expected to be shorter) used multiple times over and over. Vectors b and out should have the same length. __global__ void ElementwiseMult_Segmented_Repeat( float* out, float* a, float* b, int lengthA, int count ) { int id = GetId(); int idA; if (id < count) { idA = id % lengthA; out[id] = a[idA] * b[id]; } } // elementwise multiplication of two vectors, each element of the vector a (it is expected to be shorter than b) is used multiple times over whole segment of the vector b. Vectors b and out should have the same length. __global__ void ElementwiseMult_Segmented_Stretch( float* out, float* a, float* b, int lengthA, // = noSegments int count //length of the output and vector b ) { int id = GetId(); int segmentId; if (id < count) { segmentId = id / (count / lengthA); out[id] = a[segmentId] * b[id]; } } __global__ void ElementwiseSub( float* a, float* b, float* result, int count ) { int id = GetId(); if (id < count) { result[id] = a[id] - b[id]; } } //multiplies two vectors as matrices so that result is a matrix where output_ij = a_i * b_j __global__ void CrossMult( float* a, float* b, float* output, //matrix m * n, where m = length a and m = length b int lengthA, int lengthB ) { int id = GetId(); int i; int j; if (id < lengthA * lengthB) { i = id / lengthB; j = id % lengthB; output[id] = a[i] * b[j]; } } //multiplies each row from matrixA with each row from matrixB in a crossproduct manner, i.e., the two vectors are multiplied as matrices so that result is a matrix where output_ij = a_i * b_j. The overall result is then a tensor output_ijk, where k goes over rows in the matrices. __global__ void CrossMult_Segmented( float* output, //tensor noColumnsA * noColumnsB * noRows float* matrixA, float* matrixB, int noColumnsA, int noColumnsB, int noRows ) { int id = GetId(); int i, j, k; if (id < noColumnsA * noColumnsB * noRows) { i = id % noColumnsA; //columns in A j = (id / noColumnsA) % noColumnsB; //columns in B k = id / (noColumnsA * noColumnsB); //rows in both A and B, third dimension in the resulting tensor output[id] = matrixA[i + k * noColumnsA] * matrixB[j + k * noColumnsB]; } } __global__ void OtherAverage( float* a, float* b, float* result, int count ) { device_ElementwiseAdd_Weighted(a, b, result, 0.5f, 0.5f, count); } }
e2b01c08153c51afc409ed089d4fab02aaf0695a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <ctime> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <string> #define N 1111111 #define B 1 #define MESH 5555555 __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0], y1=xyz[(i*n+j)*3+1], z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1, y2=buf[k*3+1]-y1, z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2;} } { float x2=buf[k*3+3]-x1, y2=buf[k*3+4]-y1, z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1;} } { float x2=buf[k*3+6]-x1, y2=buf[k*3+7]-y1, z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2;} } { float x2=buf[k*3+9]-x1, y2=buf[k*3+10]-y1, z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3;} } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1, y2=buf[k*3+1]-y1, z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2;} } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void chamfer_cuda_forward(int b, int n, float * xyz1, int m, float * xyz2, float * dist1, int * idx1,float * dist2, int * idx2, hipStream_t stream){ hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b, n, xyz1, m, xyz2, dist1, idx1); hipDeviceSynchronize(); hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b, m, xyz2, n, xyz1, dist2, idx2); hipDeviceSynchronize(); return ; } float xyz1[B][N][3], xyz2[B][N][3]; float dist1[B][N], dist2[B][N]; int idx1[B][N], idx2[B][N]; float *xyz1_gpu, *xyz2_gpu, *dist1_gpu, *dist2_gpu; int *idx1_gpu, *idx2_gpu; struct Point { double x, y, z; Point() {}; Point (double _x, double _y, double _z) { x = _x; y = _y; z = _z; }; Point operator - (const Point& v) const { return Point(x - v.x, y - v.y, z - v.z);} Point operator + (const Point& v) const { return Point(x + v.x, y + v.y, z + v.z);} Point operator * (const double t) const { return Point(x * t, y * t, z * t);} double length() { return sqrt(x * x + y * y + z * z);} void normalize() { double l = length(); x /= l; y /= l; z /= l;} float dot(const Point& v) const { return x * v.x + y * v.y + z * v.z;} Point cross(const Point& v) const { return Point( y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x);} }vertices1[MESH], vertices2[MESH], normal1[MESH], normal2[MESH]; struct Face { int a, b, c; double s; Face() {}; Face (int _a, int _b, int _c) { a = _a; b = _b; c = _c; }; }faces1[MESH], faces2[MESH]; int n_vertices_1, n_vertices_2, n_faces_1, n_faces_2; int n = 0, m = 0; int resolution = 1000000; Point randomPointTriangle(Point a, Point b, Point c) { double r1 = (double) rand() / RAND_MAX; double r2 = (double) rand() / RAND_MAX; double r1sqr = std::sqrt(r1); double OneMinR1Sqr = (1 - r1sqr); double OneMinR2 = (1 - r2); a = a * OneMinR1Sqr; b = b * OneMinR2; return (c * r2 + b) * r1sqr + a; } int main(int argc, char ** argv) { std::string mesh1_file = argv[1]; std::string mesh2_file = argv[2]; std::string model_id = argv[3]; freopen(mesh1_file.c_str(), "r", stdin); scanf("%d%d", &n_vertices_1, &n_faces_1); for (int i = 0; i < n_vertices_1; i++) { double x, y, z; scanf("%lf %lf %lf", &x, &y, &z); vertices1[i] = Point(x, y, z); } double sum_area = 0; for (int i = 0; i < n_faces_1; i++) { int _, a, b, c; scanf("%d %d %d %d", &_, &a, &b, &c); faces1[i] = Face(a, b, c); faces1[i].s = (vertices1[c] - vertices1[a]).cross((vertices1[b] - vertices1[a])).length() / 2; if (std::isnan(faces1[i].s)) faces1[i].s=0; sum_area += faces1[i].s; } for (int i = 0; i < n_faces_1; i++) { int a = faces1[i].a, b = faces1[i].b, c = faces1[i].c; int t = round(resolution * (faces1[i].s / sum_area)); Point normal = (vertices1[c] - vertices1[a]).cross(vertices1[b] - vertices1[a]); normal.normalize(); for (int j = 0; j < t; j++) { Point p = randomPointTriangle(vertices1[a], vertices1[b], vertices1[c]); xyz1[0][n][0] = p.x; xyz1[0][n][1] = p.y; xyz1[0][n][2] = p.z; normal1[n] = normal; n++; } } freopen(mesh2_file.c_str(), "r", stdin); scanf("%d%d", &n_vertices_2, &n_faces_2); for (int i = 0; i < n_vertices_2; i++) { double x, y, z; scanf("%lf %lf %lf", &x, &y, &z); vertices2[i] = Point(x, y, z); } sum_area = 0; for (int i = 0; i < n_faces_2; i++) { int _, a, b, c; scanf("%d %d %d %d", &_, &a, &b, &c); faces2[i] = Face(a, b, c); faces2[i].s = (vertices2[c] - vertices2[a]).cross((vertices2[b] - vertices2[a])).length() / 2; sum_area += faces2[i].s; } for (int i = 0; i < n_faces_2; i++) { int a = faces2[i].a, b = faces2[i].b, c = faces2[i].c; int t = round(resolution * (faces2[i].s / sum_area)); Point normal = (vertices2[c] - vertices2[a]).cross(vertices2[b] - vertices2[a]); normal.normalize(); for (int j = 0; j < t; j++) { Point p = randomPointTriangle(vertices2[a], vertices2[b], vertices2[c]); xyz2[0][m][0] = p.x; xyz2[0][m][1] = p.y; xyz2[0][m][2] = p.z; normal2[m] = normal; m++; } } size_t xyz_size = max(n, m) * 3 * sizeof(float); size_t dis_size = max(n, m) * sizeof(float); size_t idx_size = max(n, m) * sizeof(int); hipMalloc((void **) &xyz1_gpu, xyz_size); hipMalloc((void **) &xyz2_gpu, xyz_size); hipMalloc((void **) &dist1_gpu, dis_size); hipMalloc((void **) &dist2_gpu, dis_size); hipMalloc((void **) &idx1_gpu, idx_size); hipMalloc((void **) &idx2_gpu, idx_size); hipMemcpy(xyz1_gpu, &xyz1[0][0], xyz_size, hipMemcpyHostToDevice); hipMemcpy(xyz2_gpu, &xyz2[0][0], xyz_size, hipMemcpyHostToDevice); chamfer_cuda_forward(1, n, xyz1_gpu, m, xyz2_gpu, dist1_gpu, idx1_gpu, dist2_gpu, idx2_gpu, NULL); hipMemcpy(&dist1[0][0], dist1_gpu, dis_size, hipMemcpyDeviceToHost); hipMemcpy(&dist2[0][0], dist2_gpu, dis_size, hipMemcpyDeviceToHost); hipMemcpy(&idx1[0][0], idx1_gpu, idx_size, hipMemcpyDeviceToHost); hipMemcpy(&idx2[0][0], idx2_gpu, idx_size, hipMemcpyDeviceToHost); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in nnd updateOutput: %s\n", hipGetErrorString(err)); return 0; } double sum = 0; double sum_normal = 0; // normal consistency for (int i = 0; i < n; i++) { sum_normal += abs(normal1[i].dot(normal2[idx1[0][i]])); } for (int i = 0; i < m; i++) { sum_normal += abs(normal2[i].dot(normal1[idx2[0][i]])); } // f-score for different threshold for (int k = 0; k <= 40; k++) { double threashold = sqrt(sum_area / resolution) * (1.0 + (double)k / 20); int cnt1 = n, cnt2 = m; for (int i = 0; i < n; i++) { double d = sqrt(dist1[0][i]); if (d > threashold) cnt1--; if (k == 0) sum += d; } for (int i = 0; i < m; i++) { double d = sqrt(dist2[0][i]); if (d > threashold) cnt2--; if (k == 0) sum += d; } double t1 = (double) cnt1 / n; double t2 = (double) cnt2 / m; double f1 = 2 * t1 * t2 / (t1 + t2 + 1e-9); printf("%lf ", f1); } // chamfer distance & normal consistency printf("%lf %lf %s\n", sum / (n + m), sum_normal / (n + m), model_id.c_str()); return 0; }
e2b01c08153c51afc409ed089d4fab02aaf0695a.cu
#include <stdio.h> #include <ctime> #include <cstdio> #include <cstdlib> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <string> #define N 1111111 #define B 1 #define MESH 5555555 __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ const int batch=512; __shared__ float buf[batch*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int k2=0;k2<m;k2+=batch){ int end_k=min(m,k2+batch)-k2; for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){ buf[j]=xyz2[(i*m+k2)*3+j]; } __syncthreads(); for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz[(i*n+j)*3+0], y1=xyz[(i*n+j)*3+1], z1=xyz[(i*n+j)*3+2]; int best_i=0; float best=0; int end_ka=end_k-(end_k&3); for (int k=0;k<end_ka;k+=4){ { float x2=buf[k*3+0]-x1, y2=buf[k*3+1]-y1, z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2;} } { float x2=buf[k*3+3]-x1, y2=buf[k*3+4]-y1, z2=buf[k*3+5]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+1;} } { float x2=buf[k*3+6]-x1, y2=buf[k*3+7]-y1, z2=buf[k*3+8]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+2;} } { float x2=buf[k*3+9]-x1, y2=buf[k*3+10]-y1, z2=buf[k*3+11]-z1; float d=x2*x2+y2*y2+z2*z2; if (d<best){ best=d; best_i=k+k2+3;} } } for (int k=end_ka;k<end_k;k++){ float x2=buf[k*3+0]-x1, y2=buf[k*3+1]-y1, z2=buf[k*3+2]-z1; float d=x2*x2+y2*y2+z2*z2; if (k==0 || d<best){ best=d; best_i=k+k2;} } if (k2==0 || result[(i*n+j)]>best){ result[(i*n+j)]=best; result_i[(i*n+j)]=best_i; } } __syncthreads(); } } } void chamfer_cuda_forward(int b, int n, float * xyz1, int m, float * xyz2, float * dist1, int * idx1,float * dist2, int * idx2, cudaStream_t stream){ NmDistanceKernel<<<dim3(32,16,1),512>>>(b, n, xyz1, m, xyz2, dist1, idx1); cudaDeviceSynchronize(); NmDistanceKernel<<<dim3(32,16,1),512>>>(b, m, xyz2, n, xyz1, dist2, idx2); cudaDeviceSynchronize(); return ; } float xyz1[B][N][3], xyz2[B][N][3]; float dist1[B][N], dist2[B][N]; int idx1[B][N], idx2[B][N]; float *xyz1_gpu, *xyz2_gpu, *dist1_gpu, *dist2_gpu; int *idx1_gpu, *idx2_gpu; struct Point { double x, y, z; Point() {}; Point (double _x, double _y, double _z) { x = _x; y = _y; z = _z; }; Point operator - (const Point& v) const { return Point(x - v.x, y - v.y, z - v.z);} Point operator + (const Point& v) const { return Point(x + v.x, y + v.y, z + v.z);} Point operator * (const double t) const { return Point(x * t, y * t, z * t);} double length() { return sqrt(x * x + y * y + z * z);} void normalize() { double l = length(); x /= l; y /= l; z /= l;} float dot(const Point& v) const { return x * v.x + y * v.y + z * v.z;} Point cross(const Point& v) const { return Point( y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x);} }vertices1[MESH], vertices2[MESH], normal1[MESH], normal2[MESH]; struct Face { int a, b, c; double s; Face() {}; Face (int _a, int _b, int _c) { a = _a; b = _b; c = _c; }; }faces1[MESH], faces2[MESH]; int n_vertices_1, n_vertices_2, n_faces_1, n_faces_2; int n = 0, m = 0; int resolution = 1000000; Point randomPointTriangle(Point a, Point b, Point c) { double r1 = (double) rand() / RAND_MAX; double r2 = (double) rand() / RAND_MAX; double r1sqr = std::sqrt(r1); double OneMinR1Sqr = (1 - r1sqr); double OneMinR2 = (1 - r2); a = a * OneMinR1Sqr; b = b * OneMinR2; return (c * r2 + b) * r1sqr + a; } int main(int argc, char ** argv) { std::string mesh1_file = argv[1]; std::string mesh2_file = argv[2]; std::string model_id = argv[3]; freopen(mesh1_file.c_str(), "r", stdin); scanf("%d%d", &n_vertices_1, &n_faces_1); for (int i = 0; i < n_vertices_1; i++) { double x, y, z; scanf("%lf %lf %lf", &x, &y, &z); vertices1[i] = Point(x, y, z); } double sum_area = 0; for (int i = 0; i < n_faces_1; i++) { int _, a, b, c; scanf("%d %d %d %d", &_, &a, &b, &c); faces1[i] = Face(a, b, c); faces1[i].s = (vertices1[c] - vertices1[a]).cross((vertices1[b] - vertices1[a])).length() / 2; if (std::isnan(faces1[i].s)) faces1[i].s=0; sum_area += faces1[i].s; } for (int i = 0; i < n_faces_1; i++) { int a = faces1[i].a, b = faces1[i].b, c = faces1[i].c; int t = round(resolution * (faces1[i].s / sum_area)); Point normal = (vertices1[c] - vertices1[a]).cross(vertices1[b] - vertices1[a]); normal.normalize(); for (int j = 0; j < t; j++) { Point p = randomPointTriangle(vertices1[a], vertices1[b], vertices1[c]); xyz1[0][n][0] = p.x; xyz1[0][n][1] = p.y; xyz1[0][n][2] = p.z; normal1[n] = normal; n++; } } freopen(mesh2_file.c_str(), "r", stdin); scanf("%d%d", &n_vertices_2, &n_faces_2); for (int i = 0; i < n_vertices_2; i++) { double x, y, z; scanf("%lf %lf %lf", &x, &y, &z); vertices2[i] = Point(x, y, z); } sum_area = 0; for (int i = 0; i < n_faces_2; i++) { int _, a, b, c; scanf("%d %d %d %d", &_, &a, &b, &c); faces2[i] = Face(a, b, c); faces2[i].s = (vertices2[c] - vertices2[a]).cross((vertices2[b] - vertices2[a])).length() / 2; sum_area += faces2[i].s; } for (int i = 0; i < n_faces_2; i++) { int a = faces2[i].a, b = faces2[i].b, c = faces2[i].c; int t = round(resolution * (faces2[i].s / sum_area)); Point normal = (vertices2[c] - vertices2[a]).cross(vertices2[b] - vertices2[a]); normal.normalize(); for (int j = 0; j < t; j++) { Point p = randomPointTriangle(vertices2[a], vertices2[b], vertices2[c]); xyz2[0][m][0] = p.x; xyz2[0][m][1] = p.y; xyz2[0][m][2] = p.z; normal2[m] = normal; m++; } } size_t xyz_size = max(n, m) * 3 * sizeof(float); size_t dis_size = max(n, m) * sizeof(float); size_t idx_size = max(n, m) * sizeof(int); cudaMalloc((void **) &xyz1_gpu, xyz_size); cudaMalloc((void **) &xyz2_gpu, xyz_size); cudaMalloc((void **) &dist1_gpu, dis_size); cudaMalloc((void **) &dist2_gpu, dis_size); cudaMalloc((void **) &idx1_gpu, idx_size); cudaMalloc((void **) &idx2_gpu, idx_size); cudaMemcpy(xyz1_gpu, &xyz1[0][0], xyz_size, cudaMemcpyHostToDevice); cudaMemcpy(xyz2_gpu, &xyz2[0][0], xyz_size, cudaMemcpyHostToDevice); chamfer_cuda_forward(1, n, xyz1_gpu, m, xyz2_gpu, dist1_gpu, idx1_gpu, dist2_gpu, idx2_gpu, NULL); cudaMemcpy(&dist1[0][0], dist1_gpu, dis_size, cudaMemcpyDeviceToHost); cudaMemcpy(&dist2[0][0], dist2_gpu, dis_size, cudaMemcpyDeviceToHost); cudaMemcpy(&idx1[0][0], idx1_gpu, idx_size, cudaMemcpyDeviceToHost); cudaMemcpy(&idx2[0][0], idx2_gpu, idx_size, cudaMemcpyDeviceToHost); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); return 0; } double sum = 0; double sum_normal = 0; // normal consistency for (int i = 0; i < n; i++) { sum_normal += abs(normal1[i].dot(normal2[idx1[0][i]])); } for (int i = 0; i < m; i++) { sum_normal += abs(normal2[i].dot(normal1[idx2[0][i]])); } // f-score for different threshold for (int k = 0; k <= 40; k++) { double threashold = sqrt(sum_area / resolution) * (1.0 + (double)k / 20); int cnt1 = n, cnt2 = m; for (int i = 0; i < n; i++) { double d = sqrt(dist1[0][i]); if (d > threashold) cnt1--; if (k == 0) sum += d; } for (int i = 0; i < m; i++) { double d = sqrt(dist2[0][i]); if (d > threashold) cnt2--; if (k == 0) sum += d; } double t1 = (double) cnt1 / n; double t2 = (double) cnt2 / m; double f1 = 2 * t1 * t2 / (t1 + t2 + 1e-9); printf("%lf ", f1); } // chamfer distance & normal consistency printf("%lf %lf %s\n", sum / (n + m), sum_normal / (n + m), model_id.c_str()); return 0; }
9b0d6c59a448357fb88cd109b34d3433880ba146.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2017-2023 by XGBoost contributors */ #include <thrust/fill.h> #include <thrust/device_ptr.h> #include <algorithm> #include <cstdint> #include <mutex> #include "xgboost/data.h" #include "xgboost/host_device_vector.h" #include "xgboost/tree_model.h" #include "device_helpers_hip.cuh" namespace xgboost { // the handler to call instead of hipSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } template <typename T> class HostDeviceVectorImpl { public: HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(size, v); } else { data_h_.resize(size, v); } } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; LazyResizeDevice(init.size()); Copy(init); } else { data_h_ = init; } } HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) : device_{that.device_}, data_h_{std::move(that.data_h_)}, data_d_{std::move(that.data_d_)}, gpu_access_{that.gpu_access_} {} ~HostDeviceVectorImpl() { if (device_ >= 0) { SetDevice(); } } size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0; } int DeviceIdx() const { return device_; } T* DevicePointer() { LazySyncDevice(GPUAccess::kWrite); return data_d_->data().get(); } const T* ConstDevicePointer() { LazySyncDevice(GPUAccess::kRead); return data_d_->data().get(); } common::Span<T> DeviceSpan() { LazySyncDevice(GPUAccess::kWrite); return {data_d_->data().get(), Size()}; } common::Span<const T> ConstDeviceSpan() { LazySyncDevice(GPUAccess::kRead); return {data_d_->data().get(), Size()}; } void Fill(T v) { // NOLINT if (HostCanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { gpu_access_ = GPUAccess::kWrite; SetDevice(); auto s_data = dh::ToSpan(*data_d_); dh::LaunchN(data_d_->size(), [=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); SetDevice(other->device_); // Data is on host. if (HostCanWrite() && other->HostCanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } SetDevice(); CopyToDevice(other); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.data()); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.begin()); } } void Extend(HostDeviceVectorImpl* other) { auto ori_size = this->Size(); this->Resize(ori_size + other->Size(), T()); if (HostCanWrite() && other->HostCanRead()) { auto& h_vec = this->HostVector(); auto& other_vec = other->HostVector(); CHECK_EQ(h_vec.size(), ori_size + other->Size()); std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size); } else { auto ptr = other->ConstDevicePointer(); SetDevice(); CHECK_EQ(this->DeviceIdx(), other->DeviceIdx()); dh::safe_cuda(hipMemcpyAsync(this->DevicePointer() + ori_size, ptr, other->Size() * sizeof(T), hipMemcpyDeviceToDevice)); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kNone); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void SetDevice(int device) { if (device_ == device) { return; } if (device_ >= 0) { LazySyncHost(GPUAccess::kNone); } if (device_ >= 0 && device >= 0) { CHECK_EQ(device_, device) << "New device ordinal is different from previous one."; } device_ = device; if (device_ >= 0) { LazyResizeDevice(data_h_.size()); } } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) { // fast on-device resize gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(new_size, v); } else { // resize on host LazySyncHost(GPUAccess::kNone); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (HostCanAccess(access)) { return; } if (HostCanRead()) { // data is present, just need to deny access to the device gpu_access_ = access; return; } gpu_access_ = access; if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); } SetDevice(); dh::safe_cuda(hipMemcpy(data_h_.data(), data_d_->data().get(), data_d_->size() * sizeof(T), hipMemcpyDeviceToHost)); } void LazySyncDevice(GPUAccess access) { if (DeviceCanAccess(access)) { return; } if (DeviceCanRead()) { // deny read to the host gpu_access_ = access; return; } // data is on the host LazyResizeDevice(data_h_.size()); SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), data_h_.data(), data_d_->size() * sizeof(T), hipMemcpyHostToDevice)); gpu_access_ = access; } bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; } bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); } bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); } bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; } bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); } bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); } GPUAccess Access() const { return gpu_access_; } private: int device_{-1}; std::vector<T> data_h_{}; std::unique_ptr<dh::device_vector<T>> data_d_{}; GPUAccess gpu_access_{GPUAccess::kNone}; void CopyToDevice(HostDeviceVectorImpl* other) { if (other->HostCanWrite()) { CopyToDevice(other->data_h_.data()); } else { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(), data_d_->size() * sizeof(T), hipMemcpyDefault)); } } void CopyToDevice(const T* begin) { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), begin, data_d_->size() * sizeof(T), hipMemcpyDefault)); } void LazyResizeDevice(size_t new_size) { if (data_d_ && new_size == data_d_->size()) { return; } SetDevice(); data_d_->resize(new_size); } void SetDevice() { CHECK_GE(device_, 0); if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(hipSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } if (!data_d_) { data_d_.reset(new dh::device_vector<T>); } } }; template<typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(new HostDeviceVectorImpl<T>(size, v, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other) : impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {} template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> new_impl( new HostDeviceVectorImpl<T>(std::move(*other.impl_))); delete impl_; impl_ = new_impl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer() { return impl_->DevicePointer(); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer() const { return impl_->ConstDevicePointer(); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan() { return impl_->DeviceSpan(); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const { return impl_->ConstDeviceSpan(); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Extend(HostDeviceVector const& other) { impl_->Extend(other.impl_); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanRead() const { return impl_->HostCanRead(); } template <typename T> bool HostDeviceVector<T>::HostCanWrite() const { return impl_->HostCanWrite(); } template <typename T> bool HostDeviceVector<T>::DeviceCanRead() const { return impl_->DeviceCanRead(); } template <typename T> bool HostDeviceVector<T>::DeviceCanWrite() const { return impl_->DeviceCanWrite(); } template <typename T> GPUAccess HostDeviceVector<T>::DeviceAccess() const { return impl_->Access(); } template <typename T> void HostDeviceVector<T>::SetDevice(int device) const { impl_->SetDevice(device); } template <typename T> void HostDeviceVector<T>::SetDevice(DeviceOrd device) const { impl_->SetDevice(device.ordinal); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<double>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<GradientPairPrecise>; template class HostDeviceVector<int32_t>; // bst_node_t template class HostDeviceVector<uint8_t>; template class HostDeviceVector<FeatureType>; template class HostDeviceVector<Entry>; template class HostDeviceVector<uint64_t>; // bst_row_t template class HostDeviceVector<uint32_t>; // bst_feature_t template class HostDeviceVector<RegTree::Node>; template class HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>; template class HostDeviceVector<RTreeNodeStat>; #if defined(__APPLE__) /* * On OSX: * * typedef unsigned int uint32_t; * typedef unsigned long long uint64_t; * typedef unsigned long __darwin_size_t; */ template class HostDeviceVector<std::size_t>; #endif // defined(__APPLE__) } // namespace xgboost
9b0d6c59a448357fb88cd109b34d3433880ba146.cu
/** * Copyright 2017-2023 by XGBoost contributors */ #include <thrust/fill.h> #include <thrust/device_ptr.h> #include <algorithm> #include <cstdint> #include <mutex> #include "xgboost/data.h" #include "xgboost/host_device_vector.h" #include "xgboost/tree_model.h" #include "device_helpers.cuh" namespace xgboost { // the handler to call instead of cudaSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } template <typename T> class HostDeviceVectorImpl { public: HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(size, v); } else { data_h_.resize(size, v); } } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) { if (device >= 0) { gpu_access_ = GPUAccess::kWrite; LazyResizeDevice(init.size()); Copy(init); } else { data_h_ = init; } } HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) : device_{that.device_}, data_h_{std::move(that.data_h_)}, data_d_{std::move(that.data_d_)}, gpu_access_{that.gpu_access_} {} ~HostDeviceVectorImpl() { if (device_ >= 0) { SetDevice(); } } size_t Size() const { return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0; } int DeviceIdx() const { return device_; } T* DevicePointer() { LazySyncDevice(GPUAccess::kWrite); return data_d_->data().get(); } const T* ConstDevicePointer() { LazySyncDevice(GPUAccess::kRead); return data_d_->data().get(); } common::Span<T> DeviceSpan() { LazySyncDevice(GPUAccess::kWrite); return {data_d_->data().get(), Size()}; } common::Span<const T> ConstDeviceSpan() { LazySyncDevice(GPUAccess::kRead); return {data_d_->data().get(), Size()}; } void Fill(T v) { // NOLINT if (HostCanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { gpu_access_ = GPUAccess::kWrite; SetDevice(); auto s_data = dh::ToSpan(*data_d_); dh::LaunchN(data_d_->size(), [=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); SetDevice(other->device_); // Data is on host. if (HostCanWrite() && other->HostCanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } SetDevice(); CopyToDevice(other); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.data()); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (HostCanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { CopyToDevice(other.begin()); } } void Extend(HostDeviceVectorImpl* other) { auto ori_size = this->Size(); this->Resize(ori_size + other->Size(), T()); if (HostCanWrite() && other->HostCanRead()) { auto& h_vec = this->HostVector(); auto& other_vec = other->HostVector(); CHECK_EQ(h_vec.size(), ori_size + other->Size()); std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size); } else { auto ptr = other->ConstDevicePointer(); SetDevice(); CHECK_EQ(this->DeviceIdx(), other->DeviceIdx()); dh::safe_cuda(cudaMemcpyAsync(this->DevicePointer() + ori_size, ptr, other->Size() * sizeof(T), cudaMemcpyDeviceToDevice)); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kNone); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void SetDevice(int device) { if (device_ == device) { return; } if (device_ >= 0) { LazySyncHost(GPUAccess::kNone); } if (device_ >= 0 && device >= 0) { CHECK_EQ(device_, device) << "New device ordinal is different from previous one."; } device_ = device; if (device_ >= 0) { LazyResizeDevice(data_h_.size()); } } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) { // fast on-device resize gpu_access_ = GPUAccess::kWrite; SetDevice(); data_d_->resize(new_size, v); } else { // resize on host LazySyncHost(GPUAccess::kNone); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (HostCanAccess(access)) { return; } if (HostCanRead()) { // data is present, just need to deny access to the device gpu_access_ = access; return; } gpu_access_ = access; if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); } SetDevice(); dh::safe_cuda(cudaMemcpy(data_h_.data(), data_d_->data().get(), data_d_->size() * sizeof(T), cudaMemcpyDeviceToHost)); } void LazySyncDevice(GPUAccess access) { if (DeviceCanAccess(access)) { return; } if (DeviceCanRead()) { // deny read to the host gpu_access_ = access; return; } // data is on the host LazyResizeDevice(data_h_.size()); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), data_h_.data(), data_d_->size() * sizeof(T), cudaMemcpyHostToDevice)); gpu_access_ = access; } bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; } bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); } bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); } bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; } bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); } bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); } GPUAccess Access() const { return gpu_access_; } private: int device_{-1}; std::vector<T> data_h_{}; std::unique_ptr<dh::device_vector<T>> data_d_{}; GPUAccess gpu_access_{GPUAccess::kNone}; void CopyToDevice(HostDeviceVectorImpl* other) { if (other->HostCanWrite()) { CopyToDevice(other->data_h_.data()); } else { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(), data_d_->size() * sizeof(T), cudaMemcpyDefault)); } } void CopyToDevice(const T* begin) { LazyResizeDevice(Size()); gpu_access_ = GPUAccess::kWrite; SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), begin, data_d_->size() * sizeof(T), cudaMemcpyDefault)); } void LazyResizeDevice(size_t new_size) { if (data_d_ && new_size == data_d_->size()) { return; } SetDevice(); data_d_->resize(new_size); } void SetDevice() { CHECK_GE(device_, 0); if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(cudaSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } if (!data_d_) { data_d_.reset(new dh::device_vector<T>); } } }; template<typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(new HostDeviceVectorImpl<T>(size, v, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(new HostDeviceVectorImpl<T>(init, device)) {} template <typename T> HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other) : impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {} template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) { if (this == &other) { return *this; } std::unique_ptr<HostDeviceVectorImpl<T>> new_impl( new HostDeviceVectorImpl<T>(std::move(*other.impl_))); delete impl_; impl_ = new_impl.release(); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer() { return impl_->DevicePointer(); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer() const { return impl_->ConstDevicePointer(); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan() { return impl_->DeviceSpan(); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const { return impl_->ConstDeviceSpan(); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Extend(HostDeviceVector const& other) { impl_->Extend(other.impl_); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanRead() const { return impl_->HostCanRead(); } template <typename T> bool HostDeviceVector<T>::HostCanWrite() const { return impl_->HostCanWrite(); } template <typename T> bool HostDeviceVector<T>::DeviceCanRead() const { return impl_->DeviceCanRead(); } template <typename T> bool HostDeviceVector<T>::DeviceCanWrite() const { return impl_->DeviceCanWrite(); } template <typename T> GPUAccess HostDeviceVector<T>::DeviceAccess() const { return impl_->Access(); } template <typename T> void HostDeviceVector<T>::SetDevice(int device) const { impl_->SetDevice(device); } template <typename T> void HostDeviceVector<T>::SetDevice(DeviceOrd device) const { impl_->SetDevice(device.ordinal); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<double>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<GradientPairPrecise>; template class HostDeviceVector<int32_t>; // bst_node_t template class HostDeviceVector<uint8_t>; template class HostDeviceVector<FeatureType>; template class HostDeviceVector<Entry>; template class HostDeviceVector<uint64_t>; // bst_row_t template class HostDeviceVector<uint32_t>; // bst_feature_t template class HostDeviceVector<RegTree::Node>; template class HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>; template class HostDeviceVector<RTreeNodeStat>; #if defined(__APPLE__) /* * On OSX: * * typedef unsigned int uint32_t; * typedef unsigned long long uint64_t; * typedef unsigned long __darwin_size_t; */ template class HostDeviceVector<std::size_t>; #endif // defined(__APPLE__) } // namespace xgboost
2a0e61a4fb945773090d51b6ce4e5905053d190a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int nxthreads = gridDim.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int ix2 = ix + nxthreads; unsigned int idx = iy * nx + ix; unsigned int idx2 = iy * nx + ix2; if (iy < ny) { if (ix < nx) MatC[idx] = MatA[idx] + MatB[idx]; if (ix2 < nx) MatC[idx2] = MatA[idx2] + MatB[idx2]; } }
2a0e61a4fb945773090d51b6ce4e5905053d190a.cu
#include "includes.h" __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny) { unsigned int nxthreads = gridDim.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int ix2 = ix + nxthreads; unsigned int idx = iy * nx + ix; unsigned int idx2 = iy * nx + ix2; if (iy < ny) { if (ix < nx) MatC[idx] = MatA[idx] + MatB[idx]; if (ix2 < nx) MatC[idx2] = MatA[idx2] + MatB[idx2]; } }
6fdf8e7c609cb002e4638264c6944c4bf112bbb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlarfg.cu normal z -> d, Tue Sep 2 12:38:16 2014 @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define REAL // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_dlarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void dlarfg_kernel( int n, double* dalpha, double* dx, int incx, double* dtau ) { const int tx = threadIdx.x; __shared__ double swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ double sscale; __shared__ double sscale2; double tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { double alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_D_ZERO; } else { // beta = norm( [dalpha, dx] ) double beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_D_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_D_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- DLARFG generates a real elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = norm([alpha, x]), and x is an (n-1)-element real vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a real scalar and v is a real (n-1)-element vector. Note that H is not symmetric. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha DOUBLE_PRECISION* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx DOUBLE_PRECISION array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau DOUBLE_PRECISION* on the GPU. Pointer to the value tau. ********************************************************************/ extern "C" void magmablas_dlarfg( magma_int_t n, double* dalpha, double* dx, magma_int_t incx, double* dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); hipLaunchKernelGGL(( dlarfg_kernel), dim3(blocks), dim3(threads) , 0, 0, n, dalpha, dx, incx, dtau ); }
6fdf8e7c609cb002e4638264c6944c4bf112bbb9.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlarfg.cu normal z -> d, Tue Sep 2 12:38:16 2014 @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define REAL // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_dlarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void dlarfg_kernel( int n, double* dalpha, double* dx, int incx, double* dtau ) { const int tx = threadIdx.x; __shared__ double swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ double sscale; __shared__ double sscale2; double tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { double alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_D_ZERO; } else { // beta = norm( [dalpha, dx] ) double beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_D_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_D_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- DLARFG generates a real elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = ±norm([alpha, x]), and x is an (n-1)-element real vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a real scalar and v is a real (n-1)-element vector. Note that H is not symmetric. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha DOUBLE_PRECISION* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx DOUBLE_PRECISION array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau DOUBLE_PRECISION* on the GPU. Pointer to the value tau. ********************************************************************/ extern "C" void magmablas_dlarfg( magma_int_t n, double* dalpha, double* dx, magma_int_t incx, double* dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); dlarfg_kernel<<< blocks, threads >>>( n, dalpha, dx, incx, dtau ); }
328060eb6626f5ac060f5601fddbd0868a810b50.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define CHECK_FOR_CORRECTNESS 1 #define MIN(a,b) (( (a) < (b) )?(a):(b)) #define GE 1 #define GI 2 /* Following section contains Kernel functions used by prefix sum */ /* Kernel Function1 - Initialize the array */ __global__ void initializeArray(int* A, int* B, int N) { int i = threadIdx.x; if(i<N) B[i] = A[i]; } /* Kernel Function2 - PrefixOperations on B */ __global__ void prefixOnB(int* B, int t, int s) { int i = threadIdx.x; B[t + i] = MIN(B[s + 2*i - 1] , B[s + 2*i]); } /* kernel Function3 - PrefixOperations on C */ __global__ void prefixOnC(int* B, int* C,int t, int s) { int i = threadIdx.x; if (1 == i) C[t + i] = B[t + i]; else if((i%2) == 0) { C[t + i] = C[s + (i>>1)]; } else { C[t + i] = MIN(C[s +((i-1)>>1)] , B[t + i]); } } /* Kernel Function4 - Copy the results */ __global__ void copyArray(int* S, int* C, int N) { int i = threadIdx.x; S[i] = C[i]; //printf("Setting S[%d] = %d , from C[%d] = %d\n", i, S[i], i, C[i]); } /* Just a somple function to get log to base 2*/ int log2(int x) { int k = 0; while(x>>=1) k++; return k; } /* Compute prefix sum of A into B * @param N - size of array A * @param d_A - Initial device(CUDA)-array over which prefixSum should be calculated * @param d_S - device(CUDA)-array into which prefix Sum has to be calculated */ void computePrefixSum(int * d_A, int* d_S, int N) { int * d_B, *d_C; size_t arrSize = N*sizeof(int); hipMalloc(&d_B, 2*arrSize); hipMalloc(&d_C, 2*arrSize); /* First call to Kernel Function to Initialize B */ int threadsPerBlock = N; int blocksPerGrid = 1;hipLaunchKernelGGL(( initializeArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, N); /* A few variables required in prefix-computations */ int m = N, t = 0, h=1; int k = log2(N); int s = 0; for(h =1; h<=k; h++) { s = t; t += m; m >>=1; /* Second call to CUDA Kernel Function - This time logN calls. Every call has m parallel instances */ blocksPerGrid = 1; threadsPerBlock = m; hipLaunchKernelGGL(( prefixOnB), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_B, t , s); } for(h=k;h>=0;h--) { blocksPerGrid = 1; threadsPerBlock = m; /* Third call to kernel function - Again logN times m of them */ hipLaunchKernelGGL(( prefixOnC), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_B, d_C, t , s); m<<=1; s= t; t-=m; } /* Copy the results from C */ threadsPerBlock = N; blocksPerGrid = 1;hipLaunchKernelGGL(( copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_S, d_C, N); /* Freeing two temporary device arrays B, C */ hipFree(d_B); hipFree(d_C); return; } /* Set of Kernel Functions used in sequence alignment calculation */ /* Kernel function to initialize d_G0, d_D0, d_I0 */ __global__ void initFirstRow(int *d_D0,int * d_I0, int *d_G0) { int i = threadIdx.x; d_G0[i] = GI + GE*i; d_D0[i] = GE*(i+1) + GI; if(0 == i) d_I0[i] = d_G0[i] + GE; } /* Kernel Function to update D from previous row */ __global__ void updateD(int* d_D1, int* d_D0, int* d_G0) { int j = threadIdx.x; d_D1[j] = MIN(d_D0[j] , d_G0[j] + GI )+GE; } /* Kernel Function to update array-U from currentD and previous G */ __global__ void updateU(int* d_U , int* d_D1, int* d_G0, int i, char* d_X, char* d_Y) { int j = threadIdx.x; if(j!=0) { int Sij; if(d_X[i] == d_Y[j]) Sij = 0; else Sij = 1; d_U[j] = MIN(d_D1[j], d_G0[j-1] + Sij); } } /* Kernel Function to update array-V from array-U */ __global__ void updateV(int* d_V, int* d_U) { int j = threadIdx.x; if(j!=0) { d_V[j] = d_U[j] + GI - j*GE; } } /* Main function - All of the implementation is in main */ int main() { int N; int blocksPerGrid, threadsPerBlock; char * X, *Y; /* char arrays in */ char * d_X, *d_Y; /* Global so that everyone can access */ /* Set of rows for matrices D, I, G and arrays U, V */ /* Have two versions R0, R1 for every array and they are used interchangably in every iteration */ int* d_D0, *d_D1, *d_I0, *d_I1, *d_G0, *d_G1, *d_U, *d_V, *d_S; scanf("%d",&N); size_t strSize = (N+1)*sizeof(char); size_t arrSize = N*sizeof(int); X = (char*) malloc(strSize); Y = (char*) malloc(strSize); printf("Going to take input for string with size %d\n", N); scanf("%s", X); scanf("%s", Y); printf("%s\n", X); printf("%s\n", Y); /* Declare and Initialize device arrays d_X, d_Y */ hipMalloc(&d_X, strSize ); hipMalloc(&d_Y, strSize ); hipMalloc(&d_D0, arrSize ); hipMalloc(&d_D1, arrSize ); hipMalloc(&d_G0, arrSize ); hipMalloc(&d_G1, arrSize ); hipMalloc(&d_I0, arrSize ); hipMalloc(&d_I1, arrSize ); hipMalloc(&d_U, arrSize ); hipMalloc(&d_V, arrSize ); hipMalloc(&d_S, arrSize ); /* Copy vectors from host memory to device memory */ hipMemcpy(d_X, X, strSize , hipMemcpyHostToDevice); hipMemcpy(d_Y, Y, strSize , hipMemcpyHostToDevice); /*Initialize set of rows d_G0, d_I0, d_D0 */ blocksPerGrid = 1; threadsPerBlock = N;hipLaunchKernelGGL(( initFirstRow), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_D0, d_I0, d_G0); /* For rows 1 to N calculate D, G, I from previous rows */ for(int i=1;i<N;i++) { if(i%2 == 1) /* Odd rows */ { hipLaunchKernelGGL(( updateD), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_D1, d_D0, d_G0); hipLaunchKernelGGL(( updateU), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_U , d_D1, d_G0, i, d_X, d_Y); hipLaunchKernelGGL(( updateV), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_V , d_U); computePrefixSum(d_V, d_S, N); } else /*Even rows*/ { hipLaunchKernelGGL(( updateD), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_D0, d_D1, d_G1); hipLaunchKernelGGL(( updateU), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_U , d_D0, d_G1, i, d_X, d_Y ); hipLaunchKernelGGL(( updateV), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_V , d_U); computePrefixSum(d_V, d_S, N); } } /*Done with calculations - Free Device memory */ hipFree(d_X); hipFree(d_Y); hipFree(d_G0); hipFree(d_G1); hipFree(d_I0); hipFree(d_I1); hipFree(d_D0); hipFree(d_D1); hipFree(d_V); hipFree(d_U); hipFree(d_S); printf("%s\n", X); printf("%s\n", Y); /* Free host memory */ free(X); free(Y); return 0; }
328060eb6626f5ac060f5601fddbd0868a810b50.cu
#include<stdio.h> #define CHECK_FOR_CORRECTNESS 1 #define MIN(a,b) (( (a) < (b) )?(a):(b)) #define GE 1 #define GI 2 /* Following section contains Kernel functions used by prefix sum */ /* Kernel Function1 - Initialize the array */ __global__ void initializeArray(int* A, int* B, int N) { int i = threadIdx.x; if(i<N) B[i] = A[i]; } /* Kernel Function2 - PrefixOperations on B */ __global__ void prefixOnB(int* B, int t, int s) { int i = threadIdx.x; B[t + i] = MIN(B[s + 2*i - 1] , B[s + 2*i]); } /* kernel Function3 - PrefixOperations on C */ __global__ void prefixOnC(int* B, int* C,int t, int s) { int i = threadIdx.x; if (1 == i) C[t + i] = B[t + i]; else if((i%2) == 0) { C[t + i] = C[s + (i>>1)]; } else { C[t + i] = MIN(C[s +((i-1)>>1)] , B[t + i]); } } /* Kernel Function4 - Copy the results */ __global__ void copyArray(int* S, int* C, int N) { int i = threadIdx.x; S[i] = C[i]; //printf("Setting S[%d] = %d , from C[%d] = %d\n", i, S[i], i, C[i]); } /* Just a somple function to get log to base 2*/ int log2(int x) { int k = 0; while(x>>=1) k++; return k; } /* Compute prefix sum of A into B * @param N - size of array A * @param d_A - Initial device(CUDA)-array over which prefixSum should be calculated * @param d_S - device(CUDA)-array into which prefix Sum has to be calculated */ void computePrefixSum(int * d_A, int* d_S, int N) { int * d_B, *d_C; size_t arrSize = N*sizeof(int); cudaMalloc(&d_B, 2*arrSize); cudaMalloc(&d_C, 2*arrSize); /* First call to Kernel Function to Initialize B */ int threadsPerBlock = N; int blocksPerGrid = 1; initializeArray<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, N); /* A few variables required in prefix-computations */ int m = N, t = 0, h=1; int k = log2(N); int s = 0; for(h =1; h<=k; h++) { s = t; t += m; m >>=1; /* Second call to CUDA Kernel Function - This time logN calls. Every call has m parallel instances */ blocksPerGrid = 1; threadsPerBlock = m; prefixOnB<<<blocksPerGrid, threadsPerBlock>>>(d_B, t , s); } for(h=k;h>=0;h--) { blocksPerGrid = 1; threadsPerBlock = m; /* Third call to kernel function - Again logN times m of them */ prefixOnC<<<blocksPerGrid, threadsPerBlock>>>(d_B, d_C, t , s); m<<=1; s= t; t-=m; } /* Copy the results from C */ threadsPerBlock = N; blocksPerGrid = 1; copyArray<<<blocksPerGrid, threadsPerBlock>>>(d_S, d_C, N); /* Freeing two temporary device arrays B, C */ cudaFree(d_B); cudaFree(d_C); return; } /* Set of Kernel Functions used in sequence alignment calculation */ /* Kernel function to initialize d_G0, d_D0, d_I0 */ __global__ void initFirstRow(int *d_D0,int * d_I0, int *d_G0) { int i = threadIdx.x; d_G0[i] = GI + GE*i; d_D0[i] = GE*(i+1) + GI; if(0 == i) d_I0[i] = d_G0[i] + GE; } /* Kernel Function to update D from previous row */ __global__ void updateD(int* d_D1, int* d_D0, int* d_G0) { int j = threadIdx.x; d_D1[j] = MIN(d_D0[j] , d_G0[j] + GI )+GE; } /* Kernel Function to update array-U from currentD and previous G */ __global__ void updateU(int* d_U , int* d_D1, int* d_G0, int i, char* d_X, char* d_Y) { int j = threadIdx.x; if(j!=0) { int Sij; if(d_X[i] == d_Y[j]) Sij = 0; else Sij = 1; d_U[j] = MIN(d_D1[j], d_G0[j-1] + Sij); } } /* Kernel Function to update array-V from array-U */ __global__ void updateV(int* d_V, int* d_U) { int j = threadIdx.x; if(j!=0) { d_V[j] = d_U[j] + GI - j*GE; } } /* Main function - All of the implementation is in main */ int main() { int N; int blocksPerGrid, threadsPerBlock; char * X, *Y; /* char arrays in */ char * d_X, *d_Y; /* Global so that everyone can access */ /* Set of rows for matrices D, I, G and arrays U, V */ /* Have two versions R0, R1 for every array and they are used interchangably in every iteration */ int* d_D0, *d_D1, *d_I0, *d_I1, *d_G0, *d_G1, *d_U, *d_V, *d_S; scanf("%d",&N); size_t strSize = (N+1)*sizeof(char); size_t arrSize = N*sizeof(int); X = (char*) malloc(strSize); Y = (char*) malloc(strSize); printf("Going to take input for string with size %d\n", N); scanf("%s", X); scanf("%s", Y); printf("%s\n", X); printf("%s\n", Y); /* Declare and Initialize device arrays d_X, d_Y */ cudaMalloc(&d_X, strSize ); cudaMalloc(&d_Y, strSize ); cudaMalloc(&d_D0, arrSize ); cudaMalloc(&d_D1, arrSize ); cudaMalloc(&d_G0, arrSize ); cudaMalloc(&d_G1, arrSize ); cudaMalloc(&d_I0, arrSize ); cudaMalloc(&d_I1, arrSize ); cudaMalloc(&d_U, arrSize ); cudaMalloc(&d_V, arrSize ); cudaMalloc(&d_S, arrSize ); /* Copy vectors from host memory to device memory */ cudaMemcpy(d_X, X, strSize , cudaMemcpyHostToDevice); cudaMemcpy(d_Y, Y, strSize , cudaMemcpyHostToDevice); /*Initialize set of rows d_G0, d_I0, d_D0 */ blocksPerGrid = 1; threadsPerBlock = N; initFirstRow<<<blocksPerGrid, threadsPerBlock>>>(d_D0, d_I0, d_G0); /* For rows 1 to N calculate D, G, I from previous rows */ for(int i=1;i<N;i++) { if(i%2 == 1) /* Odd rows */ { updateD<<<blocksPerGrid, threadsPerBlock>>>(d_D1, d_D0, d_G0); updateU<<<blocksPerGrid, threadsPerBlock>>>(d_U , d_D1, d_G0, i, d_X, d_Y); updateV<<<blocksPerGrid, threadsPerBlock>>>(d_V , d_U); computePrefixSum(d_V, d_S, N); } else /*Even rows*/ { updateD<<<blocksPerGrid, threadsPerBlock>>>(d_D0, d_D1, d_G1); updateU<<<blocksPerGrid, threadsPerBlock>>>(d_U , d_D0, d_G1, i, d_X, d_Y ); updateV<<<blocksPerGrid, threadsPerBlock>>>(d_V , d_U); computePrefixSum(d_V, d_S, N); } } /*Done with calculations - Free Device memory */ cudaFree(d_X); cudaFree(d_Y); cudaFree(d_G0); cudaFree(d_G1); cudaFree(d_I0); cudaFree(d_I1); cudaFree(d_D0); cudaFree(d_D1); cudaFree(d_V); cudaFree(d_U); cudaFree(d_S); printf("%s\n", X); printf("%s\n", Y); /* Free host memory */ free(X); free(Y); return 0; }
7247d9313e55c60f0b5ebeabf440bf2de2b41651.hip
// !!! This is a file automatically generated by hipify!!! //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); //**************************************************************************** #include <iostream> #include <iomanip> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != hipSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << hipGetErrorString(err) << " " << func << std::endl; exit(1); } } #define GAUSSIAN_SZ 9 // 1 = Laplacian5x5 ; 2 = Nitidez5x5; 3 = PasoAlto5x5; 4 = Media3x3 ; 5 = Blur3x3 ; 6 = Blur5x5 ; 7 = GaussianBlur ; 8 = SobelHori3x3 ; 9 = SobelVert3x3 #define FILTER 1 //Definimos tamao de bloque en preprocesador para facilidad al hacer pruebas #define BLOCK_SZ 32 //Definimos tamao de convolucin en preprocesador para poder inicializar array de memoria constante #if FILTER == 4 || FILTER == 5 || FILTER == 8 || FILTER == 9 #define KERNEL_SZ 3 #elif FILTER == 7 #ifndef GAUSSIAN_SZ #define KERNEL_SZ 3 #else #define KERNEL_SZ GAUSSIAN_SZ #endif #else #define KERNEL_SZ 5 #endif __constant__ float d_filterConst[KERNEL_SZ*KERNEL_SZ]; //Definimos para facilitar el cambio entre los kernels de memoria compartida y global #define SHARED 1 __global__ void box_filter_shared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTA: Que un thread tenga una posicin correcta en 2D no quiere decir que al aplicar el filtro // los valores de sus vecinos sean correctos, ya que pueden salirse de la imagen. extern __shared__ unsigned char image_shared[]; const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //Shared size siempre debera ser par, ya que blockdim.x, blockdim.y //y filterwidth-1 siempre debern ser pares const int sharedSize = (blockDim.x + filterWidth - 1) * (blockDim.y + filterWidth - 1); const int halfFilterWidth = filterWidth / 2; const int threadNum = threadIdx.y * blockDim.x + threadIdx.x; const int width = blockDim.x + filterWidth - 1; const int numThreads = blockDim.x * blockDim.y; int workingThreads, offset = 0; //Calculamos coordenadas de imagen de la seccin a mapear en shared memory const int startX = blockIdx.x * blockDim.x - halfFilterWidth; const int startY = blockIdx.y * blockDim.y - halfFilterWidth; while(offset < sharedSize) { workingThreads = sharedSize - offset; workingThreads = numThreads > workingThreads ? workingThreads : numThreads; if(threadNum < workingThreads) { //Calculamos las coordenadas en shared memory int sharedY = (threadNum+offset) / width; int sharedX = (threadNum+offset) - sharedY * width; //Pasamos a coordenadas de imagen int imgX = sharedX + startX; int imgY = sharedY + startY; //Hacemos clamp para asegurar que no nos salimos de la imagen imgY = imgY >= numRows ? numRows - 1 : imgY < 0 ? 0 : imgY; imgX = imgX >= numCols ? numCols - 1 : imgX < 0 ? 0 : imgX; image_shared[threadNum + offset] = inputChannel[imgY * numCols + imgX]; } offset += workingThreads; } __syncthreads(); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int filterRadius = filterWidth / 2; float result = 0; for (int j = -filterRadius; j <= filterRadius; j++) for (int i = -filterRadius; i <= filterRadius; i++) { int x = threadIdx.x + halfFilterWidth + i; int y = threadIdx.y + halfFilterWidth + j; result += (float)d_filterConst[(j + filterRadius)*filterWidth + i + filterRadius] * (float)image_shared[y*width + x]; } outputChannel[thread_1D_pos] = result > 255 ? 255 : result < 0 ? 0 : (char)result; } __global__ void box_filter(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTA: Que un thread tenga una posicin correcta en 2D no quiere decir que al aplicar el filtro // los valores de sus vecinos sean correctos, ya que pueden salirse de la imagen. const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int filterRadius = filterWidth / 2; float result = 0; for (int j = -filterRadius; j <= filterRadius; j++) for (int i = -filterRadius; i <= filterRadius; i++) { int x = thread_2D_pos.x + i; x = x >= numCols ? numCols - 1 : x; x = x < 0 ? 0 : x; int y = thread_2D_pos.y + j; y = y >= numRows ? numRows - 1 : y; y = y < 0 ? 0 : y; //Sin memoria de constantes //result += (float) filter[(j + filterRadius)*filterWidth + i + filterRadius] * (float) inputChannel[y*numCols + x]; //Con memoria de constantes result += (float)d_filterConst[(j + filterRadius)*filterWidth + i + filterRadius] * (float)inputChannel[y*numCols + x]; } outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = result>255?255:result<0?0:(char)result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada // Copiar el filtro (h_filter) a memoria global de la GPU (d_filter) checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(d_filterConst, h_filter, sizeof(float) * filterWidth * filterWidth, 0, hipMemcpyHostToDevice)); } void create_filter(float **h_filter, int *filterWidth){ const int KernelWidth = KERNEL_SZ; //OJO CON EL TAMAO DEL FILTRO// *filterWidth = KernelWidth; //create and fill the filter we will convolve with *h_filter = new float[KernelWidth * KernelWidth]; /* //Filtro gaussiano: blur const float KernelSigma = 2.; float filterSum = 0.f; //for normalization for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) { for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) { float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma)); (*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue; filterSum += filterValue; } } float normalizationFactor = 1.f / filterSum; for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) { for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) { (*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor; } } */ #if FILTER == 2 //Nitidez 5x5 (*h_filter)[0] = -1.; (*h_filter)[1] = -3.; (*h_filter)[2] = -4.; (*h_filter)[3] = -3.; (*h_filter)[4] = -1.; (*h_filter)[5] = -3.; (*h_filter)[6] = 0; (*h_filter)[7] = 6.; (*h_filter)[8] = 0; (*h_filter)[9] = 3.; (*h_filter)[10] = -4.; (*h_filter)[11] = 6.; (*h_filter)[12] = 21.; (*h_filter)[13] = 6.; (*h_filter)[14] = -4.; (*h_filter)[15] = -3.; (*h_filter)[16] = 0; (*h_filter)[17] = 6.; (*h_filter)[18] = 0; (*h_filter)[19] = -3.; (*h_filter)[20] = -1.; (*h_filter)[21] = -3.; (*h_filter)[22] = -4.; (*h_filter)[23] = -3.; (*h_filter)[24] = -1.; #elif FILTER == 3 //PasoAlto 5x5 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.; (*h_filter)[10] = 1.; (*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.; (*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.; (*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.; for (int i = 0; i < 25; i++) (*h_filter)[i] /= 62.0; #elif FILTER == 4 //Media3x3 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 1.; (*h_filter)[7] = 1.; (*h_filter)[8] = 1.; for (int i = 0; i < 9; i++) (*h_filter)[i] /= 9.0; #elif FILTER == 5 //Blur3x3 (*h_filter)[0] = 1.; (*h_filter)[1] = 2.; (*h_filter)[2] = 1.; (*h_filter)[3] = 2.; (*h_filter)[4] = 4.; (*h_filter)[5] = 2.; (*h_filter)[6] = 1.; (*h_filter)[7] = 2.; (*h_filter)[8] = 1.; for (int i = 0; i < 9; i++) (*h_filter)[i] /= 16.0; #elif FILTER == 6 //Blur5x5 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.; (*h_filter)[10] = 1.; (*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.; (*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.; (*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.; for (int i = 0; i < 25; i++) (*h_filter)[i] /= 25.0; #elif FILTER == 7 //GaussNxN; N = GAUSSIAN_SZ const float KernelSigma = 2.; float filterSum = 0.f; //for normalization for (int r = -KernelWidth / 2; r <= KernelWidth / 2; ++r) { for (int c = -KernelWidth / 2; c <= KernelWidth / 2; ++c) { float filterValue = expf(-(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma)); (*h_filter)[(r + KernelWidth / 2) * KernelWidth + c + KernelWidth / 2] = filterValue; filterSum += filterValue; } } float normalizationFactor = 1.f / filterSum; for (int r = -KernelWidth / 2; r <= KernelWidth / 2; ++r) { for (int c = -KernelWidth / 2; c <= KernelWidth / 2; ++c) { (*h_filter)[(r + KernelWidth / 2) * KernelWidth + c + KernelWidth / 2] *= normalizationFactor; } } #elif FILTER == 8 //SobelHorizontal3x3 (*h_filter)[0] = -1.; (*h_filter)[1] = -2.; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0; (*h_filter)[5] = 0; (*h_filter)[6] = 1.; (*h_filter)[7] = 2.; (*h_filter)[8] = 1.; #elif FILTER == 9 //SobelVertical3x3 (*h_filter)[0] = -1.; (*h_filter)[1] = 0; (*h_filter)[2] = 1.; (*h_filter)[3] = -2.; (*h_filter)[4] = 0; (*h_filter)[5] = 2.; (*h_filter)[6] = -1.; (*h_filter)[7] = 0; (*h_filter)[8] = 1.; #else //Laplaciano 5x5 (*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0; (*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0; (*h_filter)[10] = -1.; (*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.; (*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0; (*h_filter)[20] = 1.; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0; #endif } void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redFiltered, unsigned char *d_greenFiltered, unsigned char *d_blueFiltered, const int filterWidth) { //TODO: Calcular tamaos de bloque const dim3 blockSize = {BLOCK_SZ, BLOCK_SZ, 1}; const dim3 gridSize = { ((unsigned int)numCols-1)/blockSize.x+1, ((unsigned int)numRows-1)/blockSize.y+1, 1 }; //TODO: Lanzar kernel para separar imagenes RGBA en diferentes colores hipLaunchKernelGGL(( separateChannels) , dim3(gridSize), dim3(blockSize) , 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue ); //TODO: Ejecutar convolucin. Una por canal #if SHARED == 1 hipLaunchKernelGGL(( box_filter_shared) , dim3(gridSize), dim3(blockSize), sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) , 0, d_red, d_redFiltered, numRows, numCols, d_filter, filterWidth ); hipLaunchKernelGGL(( box_filter_shared) , dim3(gridSize), dim3(blockSize), sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) , 0, d_green, d_greenFiltered, numRows, numCols, d_filter, filterWidth ); hipLaunchKernelGGL(( box_filter_shared) , dim3(gridSize), dim3(blockSize), sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) , 0, d_blue, d_blueFiltered, numRows, numCols, d_filter, filterWidth ); #else hipLaunchKernelGGL(( box_filter), dim3(gridSize), dim3(blockSize) , 0, 0, d_red, d_redFiltered, numRows, numCols, d_filter, filterWidth ); box_filter << <gridSize, blockSize >> > (d_green, d_greenFiltered, numRows, numCols, d_filter, filterWidth ); box_filter << <gridSize, blockSize >> > (d_blue, d_blueFiltered, numRows, numCols, d_filter, filterWidth ); #endif // Recombining the results. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
7247d9313e55c60f0b5ebeabf440bf2de2b41651.cu
//**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); //**************************************************************************** #include <iostream> #include <iomanip> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__) template<typename T> void check(T err, const char* const func, const char* const file, const int line) { if (err != cudaSuccess) { std::cerr << "CUDA error at: " << file << ":" << line << std::endl; std::cerr << cudaGetErrorString(err) << " " << func << std::endl; exit(1); } } #define GAUSSIAN_SZ 9 // 1 = Laplacian5x5 ; 2 = Nitidez5x5; 3 = PasoAlto5x5; 4 = Media3x3 ; 5 = Blur3x3 ; 6 = Blur5x5 ; 7 = GaussianBlur ; 8 = SobelHori3x3 ; 9 = SobelVert3x3 #define FILTER 1 //Definimos tamaño de bloque en preprocesador para facilidad al hacer pruebas #define BLOCK_SZ 32 //Definimos tamaño de convolución en preprocesador para poder inicializar array de memoria constante #if FILTER == 4 || FILTER == 5 || FILTER == 8 || FILTER == 9 #define KERNEL_SZ 3 #elif FILTER == 7 #ifndef GAUSSIAN_SZ #define KERNEL_SZ 3 #else #define KERNEL_SZ GAUSSIAN_SZ #endif #else #define KERNEL_SZ 5 #endif __constant__ float d_filterConst[KERNEL_SZ*KERNEL_SZ]; //Definimos para facilitar el cambio entre los kernels de memoria compartida y global #define SHARED 1 __global__ void box_filter_shared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTA: Que un thread tenga una posición correcta en 2D no quiere decir que al aplicar el filtro // los valores de sus vecinos sean correctos, ya que pueden salirse de la imagen. extern __shared__ unsigned char image_shared[]; const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //Shared size siempre debería ser par, ya que blockdim.x, blockdim.y //y filterwidth-1 siempre deberán ser pares const int sharedSize = (blockDim.x + filterWidth - 1) * (blockDim.y + filterWidth - 1); const int halfFilterWidth = filterWidth / 2; const int threadNum = threadIdx.y * blockDim.x + threadIdx.x; const int width = blockDim.x + filterWidth - 1; const int numThreads = blockDim.x * blockDim.y; int workingThreads, offset = 0; //Calculamos coordenadas de imagen de la sección a mapear en shared memory const int startX = blockIdx.x * blockDim.x - halfFilterWidth; const int startY = blockIdx.y * blockDim.y - halfFilterWidth; while(offset < sharedSize) { workingThreads = sharedSize - offset; workingThreads = numThreads > workingThreads ? workingThreads : numThreads; if(threadNum < workingThreads) { //Calculamos las coordenadas en shared memory int sharedY = (threadNum+offset) / width; int sharedX = (threadNum+offset) - sharedY * width; //Pasamos a coordenadas de imagen int imgX = sharedX + startX; int imgY = sharedY + startY; //Hacemos clamp para asegurar que no nos salimos de la imagen imgY = imgY >= numRows ? numRows - 1 : imgY < 0 ? 0 : imgY; imgX = imgX >= numCols ? numCols - 1 : imgX < 0 ? 0 : imgX; image_shared[threadNum + offset] = inputChannel[imgY * numCols + imgX]; } offset += workingThreads; } __syncthreads(); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int filterRadius = filterWidth / 2; float result = 0; for (int j = -filterRadius; j <= filterRadius; j++) for (int i = -filterRadius; i <= filterRadius; i++) { int x = threadIdx.x + halfFilterWidth + i; int y = threadIdx.y + halfFilterWidth + j; result += (float)d_filterConst[(j + filterRadius)*filterWidth + i + filterRadius] * (float)image_shared[y*width + x]; } outputChannel[thread_1D_pos] = result > 255 ? 255 : result < 0 ? 0 : (char)result; } __global__ void box_filter(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTA: Que un thread tenga una posición correcta en 2D no quiere decir que al aplicar el filtro // los valores de sus vecinos sean correctos, ya que pueden salirse de la imagen. const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; int filterRadius = filterWidth / 2; float result = 0; for (int j = -filterRadius; j <= filterRadius; j++) for (int i = -filterRadius; i <= filterRadius; i++) { int x = thread_2D_pos.x + i; x = x >= numCols ? numCols - 1 : x; x = x < 0 ? 0 : x; int y = thread_2D_pos.y + j; y = y >= numRows ? numRows - 1 : y; y = y < 0 ? 0 : y; //Sin memoria de constantes //result += (float) filter[(j + filterRadius)*filterWidth + i + filterRadius] * (float) inputChannel[y*numCols + x]; //Con memoria de constantes result += (float)d_filterConst[(j + filterRadius)*filterWidth + i + filterRadius] * (float)inputChannel[y*numCols + x]; } outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = result>255?255:result<0?0:(char)result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO: // NOTA: Cuidado al acceder a memoria que esta fuera de los limites de la imagen // const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada // Copiar el filtro (h_filter) a memoria global de la GPU (d_filter) checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(d_filterConst, h_filter, sizeof(float) * filterWidth * filterWidth, 0, cudaMemcpyHostToDevice)); } void create_filter(float **h_filter, int *filterWidth){ const int KernelWidth = KERNEL_SZ; //OJO CON EL TAMAÑO DEL FILTRO// *filterWidth = KernelWidth; //create and fill the filter we will convolve with *h_filter = new float[KernelWidth * KernelWidth]; /* //Filtro gaussiano: blur const float KernelSigma = 2.; float filterSum = 0.f; //for normalization for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) { for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) { float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma)); (*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue; filterSum += filterValue; } } float normalizationFactor = 1.f / filterSum; for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) { for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) { (*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor; } } */ #if FILTER == 2 //Nitidez 5x5 (*h_filter)[0] = -1.; (*h_filter)[1] = -3.; (*h_filter)[2] = -4.; (*h_filter)[3] = -3.; (*h_filter)[4] = -1.; (*h_filter)[5] = -3.; (*h_filter)[6] = 0; (*h_filter)[7] = 6.; (*h_filter)[8] = 0; (*h_filter)[9] = 3.; (*h_filter)[10] = -4.; (*h_filter)[11] = 6.; (*h_filter)[12] = 21.; (*h_filter)[13] = 6.; (*h_filter)[14] = -4.; (*h_filter)[15] = -3.; (*h_filter)[16] = 0; (*h_filter)[17] = 6.; (*h_filter)[18] = 0; (*h_filter)[19] = -3.; (*h_filter)[20] = -1.; (*h_filter)[21] = -3.; (*h_filter)[22] = -4.; (*h_filter)[23] = -3.; (*h_filter)[24] = -1.; #elif FILTER == 3 //PasoAlto 5x5 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.; (*h_filter)[10] = 1.; (*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.; (*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.; (*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.; for (int i = 0; i < 25; i++) (*h_filter)[i] /= 62.0; #elif FILTER == 4 //Media3x3 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 1.; (*h_filter)[7] = 1.; (*h_filter)[8] = 1.; for (int i = 0; i < 9; i++) (*h_filter)[i] /= 9.0; #elif FILTER == 5 //Blur3x3 (*h_filter)[0] = 1.; (*h_filter)[1] = 2.; (*h_filter)[2] = 1.; (*h_filter)[3] = 2.; (*h_filter)[4] = 4.; (*h_filter)[5] = 2.; (*h_filter)[6] = 1.; (*h_filter)[7] = 2.; (*h_filter)[8] = 1.; for (int i = 0; i < 9; i++) (*h_filter)[i] /= 16.0; #elif FILTER == 6 //Blur5x5 (*h_filter)[0] = 1.; (*h_filter)[1] = 1.; (*h_filter)[2] = 1.; (*h_filter)[3] = 1.; (*h_filter)[4] = 1.; (*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.; (*h_filter)[10] = 1.; (*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.; (*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.; (*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.; for (int i = 0; i < 25; i++) (*h_filter)[i] /= 25.0; #elif FILTER == 7 //GaussNxN; N = GAUSSIAN_SZ const float KernelSigma = 2.; float filterSum = 0.f; //for normalization for (int r = -KernelWidth / 2; r <= KernelWidth / 2; ++r) { for (int c = -KernelWidth / 2; c <= KernelWidth / 2; ++c) { float filterValue = expf(-(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma)); (*h_filter)[(r + KernelWidth / 2) * KernelWidth + c + KernelWidth / 2] = filterValue; filterSum += filterValue; } } float normalizationFactor = 1.f / filterSum; for (int r = -KernelWidth / 2; r <= KernelWidth / 2; ++r) { for (int c = -KernelWidth / 2; c <= KernelWidth / 2; ++c) { (*h_filter)[(r + KernelWidth / 2) * KernelWidth + c + KernelWidth / 2] *= normalizationFactor; } } #elif FILTER == 8 //SobelHorizontal3x3 (*h_filter)[0] = -1.; (*h_filter)[1] = -2.; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0; (*h_filter)[5] = 0; (*h_filter)[6] = 1.; (*h_filter)[7] = 2.; (*h_filter)[8] = 1.; #elif FILTER == 9 //SobelVertical3x3 (*h_filter)[0] = -1.; (*h_filter)[1] = 0; (*h_filter)[2] = 1.; (*h_filter)[3] = -2.; (*h_filter)[4] = 0; (*h_filter)[5] = 2.; (*h_filter)[6] = -1.; (*h_filter)[7] = 0; (*h_filter)[8] = 1.; #else //Laplaciano 5x5 (*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0; (*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0; (*h_filter)[10] = -1.; (*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.; (*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0; (*h_filter)[20] = 1.; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0; #endif } void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redFiltered, unsigned char *d_greenFiltered, unsigned char *d_blueFiltered, const int filterWidth) { //TODO: Calcular tamaños de bloque const dim3 blockSize = {BLOCK_SZ, BLOCK_SZ, 1}; const dim3 gridSize = { ((unsigned int)numCols-1)/blockSize.x+1, ((unsigned int)numRows-1)/blockSize.y+1, 1 }; //TODO: Lanzar kernel para separar imagenes RGBA en diferentes colores separateChannels <<<gridSize, blockSize >>> (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue ); //TODO: Ejecutar convolución. Una por canal #if SHARED == 1 box_filter_shared <<<gridSize, blockSize, sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) >>> ( d_red, d_redFiltered, numRows, numCols, d_filter, filterWidth ); box_filter_shared <<<gridSize, blockSize, sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) >>> ( d_green, d_greenFiltered, numRows, numCols, d_filter, filterWidth ); box_filter_shared <<<gridSize, blockSize, sizeof(unsigned char) * (blockSize.x + filterWidth - 1) * (blockSize.y + filterWidth - 1) >>> ( d_blue, d_blueFiltered, numRows, numCols, d_filter, filterWidth ); #else box_filter<<<gridSize, blockSize >>> (d_red, d_redFiltered, numRows, numCols, d_filter, filterWidth ); box_filter << <gridSize, blockSize >> > (d_green, d_greenFiltered, numRows, numCols, d_filter, filterWidth ); box_filter << <gridSize, blockSize >> > (d_blue, d_blueFiltered, numRows, numCols, d_filter, filterWidth ); #endif // Recombining the results. recombineChannels<<<gridSize, blockSize>>>(d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
3f97de13b87427ab18842b2d9c76c455a140164c.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : CudaProject.cu Author : Adrianna Urbaska, Gabriel Ch Version : Copyright : Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 32 class CudaObject { public: int size_x; int size_y; int *data; int stride; int bytes; __host__ __device__ CudaObject(int x, int y, int stride ): size_x(x), size_y(y),stride(stride){} __host__ __device__ CudaObject(const CudaObject &a): size_x(a.size_x), size_y(a.size_y),data(a.data),stride(a.stride){} __device__ int getElement(int row, int col){ return data[row * stride + col]; } __device__ void setElement(int row, int col, int val){ data[row * stride + col] = val; } __device__ CudaObject cutMatrix(int row, int col){ CudaObject tmp(BLOCK_SIZE, BLOCK_SIZE, stride); tmp.data = &data[stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return tmp; } __host__ void writeOut(){ for(int i = 0; i < size_x; i++){ std::cout<<"| "; for(int j = 0; j < size_y; j++){ std::cout<<data[i * size_y + j]<<" "; } std::cout<<"|"<<std::endl; } std::cout<<"\n"; } void setSize(int x, int y){ this->size_x = x; this->size_y = y; this->bytes = x * y * sizeof(int); hipMallocManaged(&this->data, this->bytes); } void addCpu(CudaObject &fData, CudaObject &sData){ if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){ hipMemPrefetchAsync(this->data, this->bytes, hipCpuDeviceId); hipMemPrefetchAsync(fData.data, fData.bytes, hipCpuDeviceId); hipMemPrefetchAsync(sData.data, sData.bytes, hipCpuDeviceId); this->setSize(sData.size_x, sData.size_y); for(int i = 0; i < sData.size_x; i++){ for(int j = 0; j < sData.size_y; j++){ this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] + sData.data[i * this->size_x + j]; } } } } void subCpu(CudaObject &fData, CudaObject &sData){ if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){ hipMemPrefetchAsync(this->data, this->bytes, hipCpuDeviceId); hipMemPrefetchAsync(fData.data, fData.bytes, hipCpuDeviceId); hipMemPrefetchAsync(sData.data, sData.bytes, hipCpuDeviceId); this->setSize(sData.size_x, sData.size_y); for(int i = 0; i < sData.size_x; i++){ for(int j = 0; j < sData.size_y; j++){ this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] - sData.data[i * this->size_x + j]; } } } } void mulCpu(CudaObject &fData, CudaObject &sData){ int y_s = sData.size_y; int y_f = fData.size_y; for(int i = 0; i < size_x; i++ ){ for(int j = 0; j < size_y; j++ ){ int s = 0; for(int k = 0; k < y_f; k++ ) s += fData.data[i * y_f + k] * sData.data[k * y_s + j]; this->data[i * y_s + j] = s; } } } void tranCpu(CudaObject &iData){ int x = iData.size_x; int y = iData.size_y; this->setSize(y,x); for(int n = 0; n < x * y; n++){ int i = n/x; int j = n%x; this->data[n] = iData.data[y * j + i]; } } }; __global__ void add(int *fData, int *sData, int *oData, int x, int y){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < x*y; i += stride) { oData[i] = fData[i] + sData[i]; } } __global__ void sub(int *fData, int *sData, int *oData, int x, int y){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < x * y; i += stride) { oData[i] = fData[i] - sData[i]; } } __global__ void mul(CudaObject a,CudaObject b, CudaObject c) { int cutRow = blockIdx.y ; int cutCol = blockIdx.x; int fRow = blockIdx.y * blockDim.y + threadIdx.y; int fCol = blockIdx.x * blockDim.x + threadIdx.x; int row = threadIdx.y; int col = threadIdx.x; int temp = 0; CudaObject cutMatC = c.cutMatrix(cutRow, cutCol); for( int v = 0; v < ((a.size_y + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){ CudaObject cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block CudaObject cutMatB = b.cutMatrix(v, cutCol); __shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads __shared__ int B[BLOCK_SIZE][BLOCK_SIZE]; if((row < a.size_x) && ((col + v * BLOCK_SIZE) < a.size_y)){ A[row][col] = cutMatA.getElement(row, col); } else{ A[row][col] = 0; } if((col < b.size_y) && ((row + v * BLOCK_SIZE) < b.size_x)){ B[row][col] = cutMatB.getElement(row, col); } else{ B[row][col] = 0; } __syncthreads(); //make sure that every metrix is filled for (int i = 0; i < BLOCK_SIZE; ++i){ temp += A[row][i] * B[i][col]; } __syncthreads(); } if(fRow < c.size_x && fCol < c.size_y) c.setElement(fRow, fCol, temp); } __global__ void tran(CudaObject iData, CudaObject oData){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int x = iData.size_x; int y = iData.size_y; for(int n = index; n < x * y; n += stride){ int i = n/x; int j = n%x; oData.data[n] = iData.data[y * j + i]; } } void OperationsInfo() { std::cout<<"Choose an operation:"<<std::endl; std::cout<<"1. Matrix addition on CPU"<<std::endl; std::cout<<"2. Matrix addition on GPU"<<std::endl; std::cout<<"3. Matrix substraction on CPU"<<std::endl; std::cout<<"4. Matrix substraction on GPU"<<std::endl; std::cout<<"5. Matrix multiplication on CPU"<<std::endl; std::cout<<"6. Matrix multiplication on GPU"<<std::endl; std::cout<<"7. Matrix transposition on CPU"<<std::endl; std::cout<<"8. Matrix transposition on GPU"<<std::endl; } void Init(CudaObject &oData, int val) { int x = oData.size_x; int y = oData.size_y; for(int i = 0; i < y; i++){ for(int j = 0; j<x; j++){ oData.data[i*x+j] = val; } } } int main(){ int operation; int N_1, N_2, M_1, M_2; int val_1, val_2; std::cout<<"Enter the values of size_x, size_y of the first matrix and value to filled matrix:"<<std::endl; std::cin>>N_1; std::cin>>M_1; std::cin>>val_1; std::cout<<"Enter the values of size_x, size_y of the second matrix and value to filled matrix:"<<std::endl; std::cin>>N_2; std::cin>>M_2; std::cin>>val_2; CudaObject fData(N_1, M_1, M_1), sData(N_2, M_2, M_2), oData(N_1, M_2, M_2); hipMallocManaged(&fData.data,N_1 * M_1 * sizeof(int)); hipMallocManaged(&sData.data, N_2 * M_2 * sizeof(int)); hipMallocManaged(&oData.data, N_1 * M_2 * sizeof(int)); Init(fData,val_1); Init(sData,val_2); fData.writeOut(); sData.writeOut(); OperationsInfo(); std::cin>>operation; dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 blocksPerGrid((fData.size_y + threadsPerBlock.x - 1)/threadsPerBlock.x, (sData.size_x + threadsPerBlock.y - 1)/threadsPerBlock.y); switch(operation) { case 1: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ oData.addCpu(fData,sData); oData.writeOut(); } break; case 2: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ hipLaunchKernelGGL(( add), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, fData.data, sData.data, oData.data, oData.size_x, oData.size_y); hipDeviceSynchronize(); oData.writeOut(); } break; case 3: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ oData.subCpu(fData,sData); oData.writeOut(); } break; case 4: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ hipLaunchKernelGGL(( sub), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, fData.data, sData.data, oData.data, oData.size_x, oData.size_y); hipDeviceSynchronize(); oData.writeOut(); } break; case 5: if(fData.size_y != sData.size_x){ std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl; } else{ oData.mulCpu(fData,sData); oData.writeOut(); } break; case 6: if(fData.size_y != sData.size_x){ std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl; } else{ hipLaunchKernelGGL(( mul), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, fData, sData, oData); hipDeviceSynchronize(); oData.writeOut(); } break; case 7: std::cout<<"Transposition of the first matrix: "<<std::endl; oData.tranCpu(fData); oData.writeOut(); std::cout<<"Transposition of the second matrix: "<<std::endl; oData.tranCpu(sData); oData.writeOut(); break; case 8: std::cout<<"Transposition of the first matrix: "<<std::endl; oData.setSize(M_1,N_1); hipLaunchKernelGGL(( tran), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, fData, oData); hipDeviceSynchronize(); oData.writeOut(); std::cout<<"Transposition of the second matrix: "<<std::endl; oData.setSize(M_2,N_2); hipLaunchKernelGGL(( tran), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, sData, oData); hipDeviceSynchronize(); oData.writeOut(); break; default: std::cout<<"Wrong number entered!"<<std::endl; break; } hipError_t err = hipSuccess; if (err != hipSuccess){ fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipFree(fData.data); hipFree(sData.data); hipFree(oData.data); }
3f97de13b87427ab18842b2d9c76c455a140164c.cu
/* ============================================================================ Name : CudaProject.cu Author : Adrianna Urbańska, Gabriel Chęć Version : Copyright : Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <stdio.h> #include <cuda_runtime.h> #define BLOCK_SIZE 32 class CudaObject { public: int size_x; int size_y; int *data; int stride; int bytes; __host__ __device__ CudaObject(int x, int y, int stride ): size_x(x), size_y(y),stride(stride){} __host__ __device__ CudaObject(const CudaObject &a): size_x(a.size_x), size_y(a.size_y),data(a.data),stride(a.stride){} __device__ int getElement(int row, int col){ return data[row * stride + col]; } __device__ void setElement(int row, int col, int val){ data[row * stride + col] = val; } __device__ CudaObject cutMatrix(int row, int col){ CudaObject tmp(BLOCK_SIZE, BLOCK_SIZE, stride); tmp.data = &data[stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return tmp; } __host__ void writeOut(){ for(int i = 0; i < size_x; i++){ std::cout<<"| "; for(int j = 0; j < size_y; j++){ std::cout<<data[i * size_y + j]<<" "; } std::cout<<"|"<<std::endl; } std::cout<<"\n"; } void setSize(int x, int y){ this->size_x = x; this->size_y = y; this->bytes = x * y * sizeof(int); cudaMallocManaged(&this->data, this->bytes); } void addCpu(CudaObject &fData, CudaObject &sData){ if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){ cudaMemPrefetchAsync(this->data, this->bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(fData.data, fData.bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(sData.data, sData.bytes, cudaCpuDeviceId); this->setSize(sData.size_x, sData.size_y); for(int i = 0; i < sData.size_x; i++){ for(int j = 0; j < sData.size_y; j++){ this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] + sData.data[i * this->size_x + j]; } } } } void subCpu(CudaObject &fData, CudaObject &sData){ if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){ cudaMemPrefetchAsync(this->data, this->bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(fData.data, fData.bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(sData.data, sData.bytes, cudaCpuDeviceId); this->setSize(sData.size_x, sData.size_y); for(int i = 0; i < sData.size_x; i++){ for(int j = 0; j < sData.size_y; j++){ this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] - sData.data[i * this->size_x + j]; } } } } void mulCpu(CudaObject &fData, CudaObject &sData){ int y_s = sData.size_y; int y_f = fData.size_y; for(int i = 0; i < size_x; i++ ){ for(int j = 0; j < size_y; j++ ){ int s = 0; for(int k = 0; k < y_f; k++ ) s += fData.data[i * y_f + k] * sData.data[k * y_s + j]; this->data[i * y_s + j] = s; } } } void tranCpu(CudaObject &iData){ int x = iData.size_x; int y = iData.size_y; this->setSize(y,x); for(int n = 0; n < x * y; n++){ int i = n/x; int j = n%x; this->data[n] = iData.data[y * j + i]; } } }; __global__ void add(int *fData, int *sData, int *oData, int x, int y){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < x*y; i += stride) { oData[i] = fData[i] + sData[i]; } } __global__ void sub(int *fData, int *sData, int *oData, int x, int y){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < x * y; i += stride) { oData[i] = fData[i] - sData[i]; } } __global__ void mul(CudaObject a,CudaObject b, CudaObject c) { int cutRow = blockIdx.y ; int cutCol = blockIdx.x; int fRow = blockIdx.y * blockDim.y + threadIdx.y; int fCol = blockIdx.x * blockDim.x + threadIdx.x; int row = threadIdx.y; int col = threadIdx.x; int temp = 0; CudaObject cutMatC = c.cutMatrix(cutRow, cutCol); for( int v = 0; v < ((a.size_y + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){ CudaObject cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block CudaObject cutMatB = b.cutMatrix(v, cutCol); __shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads __shared__ int B[BLOCK_SIZE][BLOCK_SIZE]; if((row < a.size_x) && ((col + v * BLOCK_SIZE) < a.size_y)){ A[row][col] = cutMatA.getElement(row, col); } else{ A[row][col] = 0; } if((col < b.size_y) && ((row + v * BLOCK_SIZE) < b.size_x)){ B[row][col] = cutMatB.getElement(row, col); } else{ B[row][col] = 0; } __syncthreads(); //make sure that every metrix is filled for (int i = 0; i < BLOCK_SIZE; ++i){ temp += A[row][i] * B[i][col]; } __syncthreads(); } if(fRow < c.size_x && fCol < c.size_y) c.setElement(fRow, fCol, temp); } __global__ void tran(CudaObject iData, CudaObject oData){ int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int x = iData.size_x; int y = iData.size_y; for(int n = index; n < x * y; n += stride){ int i = n/x; int j = n%x; oData.data[n] = iData.data[y * j + i]; } } void OperationsInfo() { std::cout<<"Choose an operation:"<<std::endl; std::cout<<"1. Matrix addition on CPU"<<std::endl; std::cout<<"2. Matrix addition on GPU"<<std::endl; std::cout<<"3. Matrix substraction on CPU"<<std::endl; std::cout<<"4. Matrix substraction on GPU"<<std::endl; std::cout<<"5. Matrix multiplication on CPU"<<std::endl; std::cout<<"6. Matrix multiplication on GPU"<<std::endl; std::cout<<"7. Matrix transposition on CPU"<<std::endl; std::cout<<"8. Matrix transposition on GPU"<<std::endl; } void Init(CudaObject &oData, int val) { int x = oData.size_x; int y = oData.size_y; for(int i = 0; i < y; i++){ for(int j = 0; j<x; j++){ oData.data[i*x+j] = val; } } } int main(){ int operation; int N_1, N_2, M_1, M_2; int val_1, val_2; std::cout<<"Enter the values of size_x, size_y of the first matrix and value to filled matrix:"<<std::endl; std::cin>>N_1; std::cin>>M_1; std::cin>>val_1; std::cout<<"Enter the values of size_x, size_y of the second matrix and value to filled matrix:"<<std::endl; std::cin>>N_2; std::cin>>M_2; std::cin>>val_2; CudaObject fData(N_1, M_1, M_1), sData(N_2, M_2, M_2), oData(N_1, M_2, M_2); cudaMallocManaged(&fData.data,N_1 * M_1 * sizeof(int)); cudaMallocManaged(&sData.data, N_2 * M_2 * sizeof(int)); cudaMallocManaged(&oData.data, N_1 * M_2 * sizeof(int)); Init(fData,val_1); Init(sData,val_2); fData.writeOut(); sData.writeOut(); OperationsInfo(); std::cin>>operation; dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 blocksPerGrid((fData.size_y + threadsPerBlock.x - 1)/threadsPerBlock.x, (sData.size_x + threadsPerBlock.y - 1)/threadsPerBlock.y); switch(operation) { case 1: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ oData.addCpu(fData,sData); oData.writeOut(); } break; case 2: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ add<<<blocksPerGrid,threadsPerBlock>>>(fData.data, sData.data, oData.data, oData.size_x, oData.size_y); cudaDeviceSynchronize(); oData.writeOut(); } break; case 3: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ oData.subCpu(fData,sData); oData.writeOut(); } break; case 4: if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){ std::cout<<"Matrices sizes have to be equal!"<<std::endl; } else{ sub<<<blocksPerGrid,threadsPerBlock>>>(fData.data, sData.data, oData.data, oData.size_x, oData.size_y); cudaDeviceSynchronize(); oData.writeOut(); } break; case 5: if(fData.size_y != sData.size_x){ std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl; } else{ oData.mulCpu(fData,sData); oData.writeOut(); } break; case 6: if(fData.size_y != sData.size_x){ std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl; } else{ mul<<<blocksPerGrid, threadsPerBlock>>>(fData, sData, oData); cudaDeviceSynchronize(); oData.writeOut(); } break; case 7: std::cout<<"Transposition of the first matrix: "<<std::endl; oData.tranCpu(fData); oData.writeOut(); std::cout<<"Transposition of the second matrix: "<<std::endl; oData.tranCpu(sData); oData.writeOut(); break; case 8: std::cout<<"Transposition of the first matrix: "<<std::endl; oData.setSize(M_1,N_1); tran<<<blocksPerGrid, threadsPerBlock>>>(fData, oData); cudaDeviceSynchronize(); oData.writeOut(); std::cout<<"Transposition of the second matrix: "<<std::endl; oData.setSize(M_2,N_2); tran<<<blocksPerGrid, threadsPerBlock>>>(sData, oData); cudaDeviceSynchronize(); oData.writeOut(); break; default: std::cout<<"Wrong number entered!"<<std::endl; break; } cudaError_t err = cudaSuccess; if (err != cudaSuccess){ fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaFree(fData.data); cudaFree(sData.data); cudaFree(oData.data); }
a7b518e88d8df02680b39e08953ff4aeb93d8e8a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #define inf 9999 #define N 1000 int main(int argc, char **argv) { struct timeval first, second, lapsed, third; struct timezone tzp, tzp2; float *host_A; int *host_Q; float *dev_x; int *dev_qx; float *A; int *Q; float *D; int i, j, bk; int k = 0; //int n = atoi(argv[1]); int n = N; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; printf("\n"); printf("RUNNING WITH %d VERTICES \n", n); printf("\n"); hipMalloc(&dev_x, n * n * sizeof (float)); hipMalloc(&dev_qx, n * n * sizeof (float)); //CPU arrays A = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas A D = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas D Q = (int *) malloc(n * n * sizeof (int)); //arxikos pinakas Q //GPU arrays host_A = (float *) malloc(n * n * sizeof (float)); host_Q = (int *) malloc(n * n * sizeof (int)); srand(time(NULL)); gettimeofday(&third, &tzp2); ////////////////////////////First Mem Copy//////////////////// gettimeofday(&first, &tzp); hipMemcpy(dev_x, host_A, n * n * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_qx, host_Q, n * n * sizeof (int), hipMemcpyHostToDevice); gettimeofday(&second, &tzp); if (first.tv_usec > second.tv_usec) { second.tv_usec += 1000000; second.tv_sec--; } lapsed.tv_usec = second.tv_usec - first.tv_usec; lapsed.tv_sec = second.tv_sec - first.tv_sec; printf("First Transfer CPU to GPU Time elapsed: %lu, %lu s\n", lapsed.tv_sec, lapsed.tv_usec); ////////////////////////////////////////////////////GPU Calculation//////////////////////////////// bk = (int) (n * n / 512); int gputhreads = 512; if (bk > 0) { gputhreads = 512; } else { bk = 1; gputhreads = n*n; } printf(" \n"); printf("BLOCKS : %d GPU THREADS: %d \n", bk, gputhreads); printf(" \n"); //gettimeofday(&first, &tzp); hipEventRecord(start); funct << <bk, gputhreads>>>(n, k, dev_x, dev_qx); hipDeviceSynchronize(); //gettimeofday(&second, &tzp); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); if (first.tv_usec > second.tv_usec) { second.tv_usec += 1000000; second.tv_sec--; } lapsed.tv_usec = second.tv_usec - first.tv_usec; lapsed.tv_sec = second.tv_sec - first.tv_sec; printf("GPU Calculation Time elapsed: %.20f\n", milliseconds * .0001); printf("\n"); printf("ALL OK WE ARE DONE \n"); return 0; }
a7b518e88d8df02680b39e08953ff4aeb93d8e8a.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #define inf 9999 #define N 1000 int main(int argc, char **argv) { struct timeval first, second, lapsed, third; struct timezone tzp, tzp2; float *host_A; int *host_Q; float *dev_x; int *dev_qx; float *A; int *Q; float *D; int i, j, bk; int k = 0; //int n = atoi(argv[1]); int n = N; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; printf("\n"); printf("RUNNING WITH %d VERTICES \n", n); printf("\n"); cudaMalloc(&dev_x, n * n * sizeof (float)); cudaMalloc(&dev_qx, n * n * sizeof (float)); //CPU arrays A = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas A D = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas D Q = (int *) malloc(n * n * sizeof (int)); //arxikos pinakas Q //GPU arrays host_A = (float *) malloc(n * n * sizeof (float)); host_Q = (int *) malloc(n * n * sizeof (int)); srand(time(NULL)); gettimeofday(&third, &tzp2); ////////////////////////////First Mem Copy//////////////////// gettimeofday(&first, &tzp); cudaMemcpy(dev_x, host_A, n * n * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_qx, host_Q, n * n * sizeof (int), cudaMemcpyHostToDevice); gettimeofday(&second, &tzp); if (first.tv_usec > second.tv_usec) { second.tv_usec += 1000000; second.tv_sec--; } lapsed.tv_usec = second.tv_usec - first.tv_usec; lapsed.tv_sec = second.tv_sec - first.tv_sec; printf("First Transfer CPU to GPU Time elapsed: %lu, %lu s\n", lapsed.tv_sec, lapsed.tv_usec); ////////////////////////////////////////////////////GPU Calculation//////////////////////////////// bk = (int) (n * n / 512); int gputhreads = 512; if (bk > 0) { gputhreads = 512; } else { bk = 1; gputhreads = n*n; } printf(" \n"); printf("BLOCKS : %d GPU THREADS: %d \n", bk, gputhreads); printf(" \n"); //gettimeofday(&first, &tzp); cudaEventRecord(start); funct << <bk, gputhreads>>>(n, k, dev_x, dev_qx); cudaThreadSynchronize(); //gettimeofday(&second, &tzp); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); if (first.tv_usec > second.tv_usec) { second.tv_usec += 1000000; second.tv_sec--; } lapsed.tv_usec = second.tv_usec - first.tv_usec; lapsed.tv_sec = second.tv_sec - first.tv_sec; printf("GPU Calculation Time elapsed: %.20f\n", milliseconds * .0001); printf("\n"); printf("ALL OK WE ARE DONE \n"); return 0; }
aa2e50e86beaac10eddac41f4e449bff1eb6417f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016. The Regents of the University of California (Regents). All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research, not-for-profit, and commercial purposes (such rights not subject to transfer), without fee, and without a signed licensing agreement, is hereby granted, provi ded that the above copyright notice, this paragraph and the following two paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology Licensi ng, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, for commercial licensing opportunities. Yang Gao, University of California, Berkeley. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMP ANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ #include <algorithm> #include <vector> #include "caffe/layers/compact_bilinear_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define CHECK_CUFFT(X) CHECK_EQ((X), HIPFFT_SUCCESS) // overloaded functions, to support float and double hipblasStatus_t cublasgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const float *alpha, const float *A, int lda, const float *beta, const float *B, int ldb, float *C, int ldc) { return hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } hipblasStatus_t cublasgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const double *alpha, const double *A, int lda, const double *beta, const double *B, int ldb, double *C, int ldc) { return hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } // caffe wrapper of transpose function // dst=src^T, with the src size being M*N template<typename Dtype> void caffe_gpu_transpose(int M, const int N, const Dtype* src, Dtype* dst) { CHECK(src != dst) << "support out of place transpose only"; Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, M, N, &alpha, src, N, &beta, dst, M, dst, M), HIPBLAS_STATUS_SUCCESS); } template<typename Dtype> void transpose_batch(const int batchlen, const int M, const int N, const Dtype* src, Dtype* dst) { const int step = M * N; for (int ins = 0; ins < batchlen; ++ins) caffe_gpu_transpose(M, N, src + ins * step, dst + ins * step); } // wrappers to deal with atomic add of double __device__ void caffe_atomic_add(float* dst, float val) { atomicAdd(dst, val); } __device__ void caffe_atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT_NEXT_LINE(runtime/int) unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } // do the getCount and do transpose along the way // should clear top to 0 before call template<typename Dtype> __global__ void GPUCountAndTranspose(const int nthreads, const int * hh, const Dtype * ss, const Dtype* bottom, Dtype* top, const int hw, const int C, const int num_output_) { // input batchlen*C*hw // output batchlen*hw*num_output, the transpose of the original output CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of things you need to do // index is the current INPUT point to be computed int left = index % (C * hw); const int ibatch = index / (C * hw); const int ic = left / hw; const int ihw = left % hw; // get the target location const int target = ibatch * (hw * num_output_) + ihw * num_output_ + hh[ic]; // atomic add only supports float not double caffe_atomic_add(top + target, ss[ic] * bottom[index]); } } // some wrappers around cufftExec // float forward hipfftResult cufftExec(hipfftHandle plan, const float *idata, CaffeComplex<float> *odata) { return hipfftExecR2C(plan, reinterpret_cast<hipfftReal*>(const_cast<float*>(idata)), reinterpret_cast<hipfftComplex*>(odata)); } // double forward hipfftResult cufftExec(hipfftHandle plan, const double *idata, CaffeComplex<double> *odata) { return hipfftExecD2Z(plan, reinterpret_cast<hipfftDoubleReal*>(const_cast<double*>(idata)), reinterpret_cast<hipfftDoubleComplex*>(odata)); } // float inverse hipfftResult cufftExec(hipfftHandle plan, const CaffeComplex<float> *idata, float *odata) { return hipfftExecC2R(plan, reinterpret_cast<hipfftComplex*>( const_cast<CaffeComplex<float>*>(idata)), reinterpret_cast<hipfftReal*>(odata)); } // double inverse hipfftResult cufftExec(hipfftHandle plan, const CaffeComplex<double> *idata, double *odata) { return hipfftExecZ2D(plan, reinterpret_cast<hipfftDoubleComplex*>( const_cast<CaffeComplex<double>*>(idata)), reinterpret_cast<hipfftDoubleReal*>(odata)); } // call cufft to do batch*nffts // hipfftReal* src; hipfftComplex *output template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_fft(const int batchlen, const int hw, const int nfft, const Dtype* src, CaffeComplex<Dtype>* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_noinv_batch, src, output)); } else { const int step_in = hw * nfft; const int step_out = hw * (floor(1.0 * nfft / 2) + 1); for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_noinv_1, src + step_in * i, output + step_out * i)); } } } template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_ifft(const int batchlen, const int hw, const int nfft, const CaffeComplex<Dtype>* src, Dtype* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_inv_batch, src, output)); } else { const int step_in = hw * (floor(1.0 * nfft / 2) + 1); const int step_out = hw * nfft; for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_inv_1, src + step_in * i, output + step_out * i)); } } } // Complex multiplication template<typename Dtype> static __device__ __host__ inline CaffeComplex<Dtype> ComplexMul( const CaffeComplex<Dtype> &a, const CaffeComplex<Dtype> &b) { CaffeComplex<Dtype> c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // entrywise multiplication: y[i]=a[i]*b[i] template<typename Dtype> __global__ void complexMul(const int nthreads, const CaffeComplex<Dtype>* a, const CaffeComplex<Dtype>* b, CaffeComplex<Dtype>* y) { CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of entries y[index] = ComplexMul(a[index], b[index]); } } // dispatchers hipblasStatus_t cublasgemv(hipblasHandle_t handle, hipblasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return hipblasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } hipblasStatus_t cublasgemv(hipblasHandle_t handle, hipblasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return hipblasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } // sum the columns of a M*N source matrix and store it to dst template<typename Dtype> void caffe_sum_cols(const int M, const int N, const Dtype* src, Dtype* dst, Dtype* ones_hw) { Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgemv(Caffe::cublas_handle(), HIPBLAS_OP_T, N, M, &alpha, src, N, ones_hw, 1, &beta, dst, 1), HIPBLAS_STATUS_SUCCESS); } template<> void CompactBilinearLayer<float>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(hipfftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_C2R, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_C2R, hw)); } template<> void CompactBilinearLayer<double>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(hipfftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_D2Z, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_D2Z, hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_Z2D, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_Z2D, hw)); } template<typename Dtype> void CompactBilinearLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int hw = bottom[0]->count(2); if (!plan_init) { // some init commands that will only be executed once plan_init = true; Initializations(hw); // get an all one vector CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&ones_hw), sizeof(Dtype) * hw)); caffe_gpu_set(hw, Dtype(1.0), ones_hw); } // memory pointer short hand Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; const int step_top = top[0]->count(1); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; // temporary space allocation Dtype* batchSpace[2]; CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&batchSpace[ipoly]), batchsz * num_output_ * hw * sizeof(Dtype))); CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype>))); } // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); for (int ipoly = 0; ipoly < 2; ++ipoly) { // some short hands Dtype* space = batchSpace[ipoly]; const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), space); // first get count and transpose nthreads = batchlen * step_bottom[ipoly]; GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], space, hw, C[ipoly], num_output_); // now space is batchlen*hw*num_output // then do FFT caffe_gpu_fft(batchlen, hw, num_output_, space, fftSpace[ipoly]); } // entry-wise multiplication int nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, fftSpace[0], fftSpace[1], fftSpace[0]); // ifft caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[0], batchSpace[0]); // transpose back Dtype* out_target; if (sum_pool_) out_target = batchSpace[1]; else out_target = top_data + batchStart * step_top; transpose_batch(batchlen, hw, num_output_, batchSpace[0], out_target); if (sum_pool_) caffe_sum_cols(batchlen * num_output_, hw, out_target, top_data + batchStart * step_top, ones_hw); } // temporary space destroy for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(hipFree(batchSpace[ipoly])); CUDA_CHECK(hipFree(fftSpace[ipoly])); } } template<typename Dtype> __global__ void copy_and_transpose(const int nthreads, const int batch, const int num_output_, const int hw, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batch*num_output_ // dst size: batch*hw*num_output_ // index over dst const int left = index % (hw * num_output_); const int ibatch = index / (hw * num_output_); const int ihw = left / num_output_; const int iout = left % num_output_; dst[index] = src[ibatch * num_output_ + iout]; } } // C, dst, hh and ss are complement template<typename Dtype> __global__ void assign_back(const int nthreads, const Dtype* src, Dtype* dst, const int* hh, const Dtype* ss, const int batchlen, const int C, const int hw, const int num_output_) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batchlen*hw*num_output // dst size: batchlen*C*hw // index over dst const int left = index % (hw * C); const int ibatch = index / (hw * C); const int ic = left / hw; const int ihw = left % hw; dst[index] += ss[ic] * src[(ibatch * hw + ihw) * num_output_ + hh[ic]]; } } template<typename Dtype> __device__ void caffe_gpu_swap(Dtype* a, Dtype* b) { if (a == b) return; Dtype t = *a; *a = *b; *b = t; } template<typename Dtype> __global__ void fliplr(const int nthreads, Dtype* src, const int M, const int N) { CUDA_KERNEL_LOOP(index, nthreads) { // src & dst are M*N // flip left right, loop over src const int m = index / N; const int n = index % N; if ((n <= (N / 2)) && (n >= 1)) caffe_gpu_swap(src + index, src + index - n + N - n); } } template<typename Dtype> void CompactBilinearLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if ((!propagate_down[0]) && (!propagate_down[1])) return; // process the same bottom case // when the two bottoms are the same, one propagate down requires the other vector<bool> pd = propagate_down; if (bottom[0] == bottom[1]) pd[0] = pd[1] = true; // memory pointer short hand const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; Dtype* bottom_diff[2] = { bottom[0]->mutable_gpu_diff(), bottom[1] ->mutable_gpu_diff() }; for (int i = 0; i < 2; ++i) caffe_gpu_set(bottom[i]->count(), Dtype(0.0), bottom_diff[i]); const Dtype* top_diff = top[0]->gpu_diff(); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int step_top = top[0]->count(1); const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; const int hw = bottom[0]->count(2); // the pointer to the (repeated) derivative Dtype* dzdy; CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&dzdy), batchsz * num_output_ * hw * sizeof(Dtype))); // fft[0] for derivative, fft[1] for data CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype> ))); // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); // (copy and) transpose the derivative if (sum_pool_) { int nthreads = batchlen * hw * num_output_; // copy and transpose the derivative copy_and_transpose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } else { // transpose the derivative transpose_batch<Dtype>(batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } // fft the derivative, stored in fftSpace[0] caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[0]); for (int ipoly = 0; ipoly < 2; ++ipoly) if (pd[1 - ipoly]) { // some short hands const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; // first get count and transpose, reuse the dzdy space nthreads = batchlen * step_bottom[ipoly]; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), dzdy); GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], dzdy, hw, C[ipoly], num_output_); // now dzdy is batchlen*hw*num_output_ // fliplr(:, 2:end) nthreads = batchlen * hw * num_output_; fliplr<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, dzdy, batchlen * hw, num_output_); // fft data caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[1]); // elementwise mul nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, fftSpace[0], fftSpace[1], fftSpace[1]); // ifft, again reuse dzdy caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[1], dzdy); // complement projection nthreads = batchlen * hw * C[1 - ipoly]; assign_back<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, dzdy, bottom_diff[1-ipoly] + batchStart * step_bottom[1-ipoly], randh_[1-ipoly].gpu_data(), rands_[1-ipoly].gpu_data(), batchlen, C[1-ipoly], hw, num_output_); } } // temporary space destroy CUDA_CHECK(hipFree(dzdy)); CUDA_CHECK(hipFree(fftSpace[0])); CUDA_CHECK(hipFree(fftSpace[1])); } INSTANTIATE_LAYER_GPU_FUNCS(CompactBilinearLayer); } // namespace caffe
aa2e50e86beaac10eddac41f4e449bff1eb6417f.cu
/* Copyright ©2016. The Regents of the University of California (Regents). All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research, not-for-profit, and commercial purposes (such rights not subject to transfer), without fee, and without a signed licensing agreement, is hereby granted, provi ded that the above copyright notice, this paragraph and the following two paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology Licensi ng, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, for commercial licensing opportunities. Yang Gao, University of California, Berkeley. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMP ANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ #include <algorithm> #include <vector> #include "caffe/layers/compact_bilinear_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define CHECK_CUFFT(X) CHECK_EQ((X), CUFFT_SUCCESS) // overloaded functions, to support float and double cublasStatus_t cublasgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float *alpha, const float *A, int lda, const float *beta, const float *B, int ldb, float *C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } cublasStatus_t cublasgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double *alpha, const double *A, int lda, const double *beta, const double *B, int ldb, double *C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } // caffe wrapper of transpose function // dst=src^T, with the src size being M*N template<typename Dtype> void caffe_gpu_transpose(int M, const int N, const Dtype* src, Dtype* dst) { CHECK(src != dst) << "support out of place transpose only"; Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, M, N, &alpha, src, N, &beta, dst, M, dst, M), CUBLAS_STATUS_SUCCESS); } template<typename Dtype> void transpose_batch(const int batchlen, const int M, const int N, const Dtype* src, Dtype* dst) { const int step = M * N; for (int ins = 0; ins < batchlen; ++ins) caffe_gpu_transpose(M, N, src + ins * step, dst + ins * step); } // wrappers to deal with atomic add of double __device__ void caffe_atomic_add(float* dst, float val) { atomicAdd(dst, val); } __device__ void caffe_atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT_NEXT_LINE(runtime/int) unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } // do the getCount and do transpose along the way // should clear top to 0 before call template<typename Dtype> __global__ void GPUCountAndTranspose(const int nthreads, const int * hh, const Dtype * ss, const Dtype* bottom, Dtype* top, const int hw, const int C, const int num_output_) { // input batchlen*C*hw // output batchlen*hw*num_output, the transpose of the original output CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of things you need to do // index is the current INPUT point to be computed int left = index % (C * hw); const int ibatch = index / (C * hw); const int ic = left / hw; const int ihw = left % hw; // get the target location const int target = ibatch * (hw * num_output_) + ihw * num_output_ + hh[ic]; // atomic add only supports float not double caffe_atomic_add(top + target, ss[ic] * bottom[index]); } } // some wrappers around cufftExec // float forward cufftResult cufftExec(cufftHandle plan, const float *idata, CaffeComplex<float> *odata) { return cufftExecR2C(plan, reinterpret_cast<cufftReal*>(const_cast<float*>(idata)), reinterpret_cast<cufftComplex*>(odata)); } // double forward cufftResult cufftExec(cufftHandle plan, const double *idata, CaffeComplex<double> *odata) { return cufftExecD2Z(plan, reinterpret_cast<cufftDoubleReal*>(const_cast<double*>(idata)), reinterpret_cast<cufftDoubleComplex*>(odata)); } // float inverse cufftResult cufftExec(cufftHandle plan, const CaffeComplex<float> *idata, float *odata) { return cufftExecC2R(plan, reinterpret_cast<cufftComplex*>( const_cast<CaffeComplex<float>*>(idata)), reinterpret_cast<cufftReal*>(odata)); } // double inverse cufftResult cufftExec(cufftHandle plan, const CaffeComplex<double> *idata, double *odata) { return cufftExecZ2D(plan, reinterpret_cast<cufftDoubleComplex*>( const_cast<CaffeComplex<double>*>(idata)), reinterpret_cast<cufftDoubleReal*>(odata)); } // call cufft to do batch*nffts // cufftReal* src; cufftComplex *output template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_fft(const int batchlen, const int hw, const int nfft, const Dtype* src, CaffeComplex<Dtype>* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_noinv_batch, src, output)); } else { const int step_in = hw * nfft; const int step_out = hw * (floor(1.0 * nfft / 2) + 1); for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_noinv_1, src + step_in * i, output + step_out * i)); } } } template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_ifft(const int batchlen, const int hw, const int nfft, const CaffeComplex<Dtype>* src, Dtype* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_inv_batch, src, output)); } else { const int step_in = hw * (floor(1.0 * nfft / 2) + 1); const int step_out = hw * nfft; for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_inv_1, src + step_in * i, output + step_out * i)); } } } // Complex multiplication template<typename Dtype> static __device__ __host__ inline CaffeComplex<Dtype> ComplexMul( const CaffeComplex<Dtype> &a, const CaffeComplex<Dtype> &b) { CaffeComplex<Dtype> c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // entrywise multiplication: y[i]=a[i]*b[i] template<typename Dtype> __global__ void complexMul(const int nthreads, const CaffeComplex<Dtype>* a, const CaffeComplex<Dtype>* b, CaffeComplex<Dtype>* y) { CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of entries y[index] = ComplexMul(a[index], b[index]); } } // dispatchers cublasStatus_t cublasgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublasgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } // sum the columns of a M*N source matrix and store it to dst template<typename Dtype> void caffe_sum_cols(const int M, const int N, const Dtype* src, Dtype* dst, Dtype* ones_hw) { Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgemv(Caffe::cublas_handle(), CUBLAS_OP_T, N, M, &alpha, src, N, ones_hw, 1, &beta, dst, 1), CUBLAS_STATUS_SUCCESS); } template<> void CompactBilinearLayer<float>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(cufftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_C2R, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_C2R, hw)); } template<> void CompactBilinearLayer<double>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(cufftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_D2Z, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_D2Z, hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_Z2D, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_Z2D, hw)); } template<typename Dtype> void CompactBilinearLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int hw = bottom[0]->count(2); if (!plan_init) { // some init commands that will only be executed once plan_init = true; Initializations(hw); // get an all one vector CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&ones_hw), sizeof(Dtype) * hw)); caffe_gpu_set(hw, Dtype(1.0), ones_hw); } // memory pointer short hand Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; const int step_top = top[0]->count(1); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; // temporary space allocation Dtype* batchSpace[2]; CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&batchSpace[ipoly]), batchsz * num_output_ * hw * sizeof(Dtype))); CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype>))); } // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); for (int ipoly = 0; ipoly < 2; ++ipoly) { // some short hands Dtype* space = batchSpace[ipoly]; const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), space); // first get count and transpose nthreads = batchlen * step_bottom[ipoly]; GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], space, hw, C[ipoly], num_output_); // now space is batchlen*hw*num_output // then do FFT caffe_gpu_fft(batchlen, hw, num_output_, space, fftSpace[ipoly]); } // entry-wise multiplication int nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, fftSpace[0], fftSpace[1], fftSpace[0]); // ifft caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[0], batchSpace[0]); // transpose back Dtype* out_target; if (sum_pool_) out_target = batchSpace[1]; else out_target = top_data + batchStart * step_top; transpose_batch(batchlen, hw, num_output_, batchSpace[0], out_target); if (sum_pool_) caffe_sum_cols(batchlen * num_output_, hw, out_target, top_data + batchStart * step_top, ones_hw); } // temporary space destroy for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(cudaFree(batchSpace[ipoly])); CUDA_CHECK(cudaFree(fftSpace[ipoly])); } } template<typename Dtype> __global__ void copy_and_transpose(const int nthreads, const int batch, const int num_output_, const int hw, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batch*num_output_ // dst size: batch*hw*num_output_ // index over dst const int left = index % (hw * num_output_); const int ibatch = index / (hw * num_output_); const int ihw = left / num_output_; const int iout = left % num_output_; dst[index] = src[ibatch * num_output_ + iout]; } } // C, dst, hh and ss are complement template<typename Dtype> __global__ void assign_back(const int nthreads, const Dtype* src, Dtype* dst, const int* hh, const Dtype* ss, const int batchlen, const int C, const int hw, const int num_output_) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batchlen*hw*num_output // dst size: batchlen*C*hw // index over dst const int left = index % (hw * C); const int ibatch = index / (hw * C); const int ic = left / hw; const int ihw = left % hw; dst[index] += ss[ic] * src[(ibatch * hw + ihw) * num_output_ + hh[ic]]; } } template<typename Dtype> __device__ void caffe_gpu_swap(Dtype* a, Dtype* b) { if (a == b) return; Dtype t = *a; *a = *b; *b = t; } template<typename Dtype> __global__ void fliplr(const int nthreads, Dtype* src, const int M, const int N) { CUDA_KERNEL_LOOP(index, nthreads) { // src & dst are M*N // flip left right, loop over src const int m = index / N; const int n = index % N; if ((n <= (N / 2)) && (n >= 1)) caffe_gpu_swap(src + index, src + index - n + N - n); } } template<typename Dtype> void CompactBilinearLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if ((!propagate_down[0]) && (!propagate_down[1])) return; // process the same bottom case // when the two bottoms are the same, one propagate down requires the other vector<bool> pd = propagate_down; if (bottom[0] == bottom[1]) pd[0] = pd[1] = true; // memory pointer short hand const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; Dtype* bottom_diff[2] = { bottom[0]->mutable_gpu_diff(), bottom[1] ->mutable_gpu_diff() }; for (int i = 0; i < 2; ++i) caffe_gpu_set(bottom[i]->count(), Dtype(0.0), bottom_diff[i]); const Dtype* top_diff = top[0]->gpu_diff(); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int step_top = top[0]->count(1); const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; const int hw = bottom[0]->count(2); // the pointer to the (repeated) derivative Dtype* dzdy; CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&dzdy), batchsz * num_output_ * hw * sizeof(Dtype))); // fft[0] for derivative, fft[1] for data CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype> ))); // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); // (copy and) transpose the derivative if (sum_pool_) { int nthreads = batchlen * hw * num_output_; // copy and transpose the derivative copy_and_transpose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } else { // transpose the derivative transpose_batch<Dtype>(batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } // fft the derivative, stored in fftSpace[0] caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[0]); for (int ipoly = 0; ipoly < 2; ++ipoly) if (pd[1 - ipoly]) { // some short hands const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; // first get count and transpose, reuse the dzdy space nthreads = batchlen * step_bottom[ipoly]; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), dzdy); GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], dzdy, hw, C[ipoly], num_output_); // now dzdy is batchlen*hw*num_output_ // fliplr(:, 2:end) nthreads = batchlen * hw * num_output_; fliplr<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, dzdy, batchlen * hw, num_output_); // fft data caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[1]); // elementwise mul nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, fftSpace[0], fftSpace[1], fftSpace[1]); // ifft, again reuse dzdy caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[1], dzdy); // complement projection nthreads = batchlen * hw * C[1 - ipoly]; assign_back<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, dzdy, bottom_diff[1-ipoly] + batchStart * step_bottom[1-ipoly], randh_[1-ipoly].gpu_data(), rands_[1-ipoly].gpu_data(), batchlen, C[1-ipoly], hw, num_output_); } } // temporary space destroy CUDA_CHECK(cudaFree(dzdy)); CUDA_CHECK(cudaFree(fftSpace[0])); CUDA_CHECK(cudaFree(fftSpace[1])); } INSTANTIATE_LAYER_GPU_FUNCS(CompactBilinearLayer); } // namespace caffe
5f9e1d7f191e4a7f4202789b38a4d65a048b6855.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "util.hpp" void benchmark_gpu(thrust::host_vector<double> values_host) { // fill a vector with random values size_t n = values_host.size(); thrust::device_vector<double> values_device(n); auto start = get_time(); // TODO: copy values to device values_device = values_host; auto h2d_time = get_time() - start; // TODO: sort values on device thrust::sort(values_device.begin(), values_device.end()); auto sort_time = get_time() - h2d_time; // TODO: copy result back to host values_host = values_device; auto time_taken = get_time() - start; std::cout << "gpu performance including transfers: " << n / time_taken / 1e6 << " million keys/s\n"; std::cout << "gpu performance without transfers: " << n / sort_time / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "gpu sort: " << (pass ? "passed\n\n" : "failed\n\n"); } void benchmark_host(thrust::host_vector<double> values_host) { size_t n = values_host.size(); auto start = get_time(); // sort values on host std::sort(values_host.begin(), values_host.end()); auto time_taken = get_time(); std::cout << "host performance: " << n / time_taken / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "host sort: " << (pass ? "passed\n\n" : "failed\n\n"); } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); std::cout << "sort test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl << std::endl; // fill a vector with random values thrust::host_vector<double> values_host(n); std::generate(values_host.begin(), values_host.end(), drand48); // start the nvprof profiling hipProfilerStart(); benchmark_gpu(values_host); benchmark_host(values_host); // stop the profiling session hipProfilerStop(); return 0; }
5f9e1d7f191e4a7f4202789b38a4d65a048b6855.cu
#include <algorithm> #include <iostream> #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include "util.hpp" void benchmark_gpu(thrust::host_vector<double> values_host) { // fill a vector with random values size_t n = values_host.size(); thrust::device_vector<double> values_device(n); auto start = get_time(); // TODO: copy values to device values_device = values_host; auto h2d_time = get_time() - start; // TODO: sort values on device thrust::sort(values_device.begin(), values_device.end()); auto sort_time = get_time() - h2d_time; // TODO: copy result back to host values_host = values_device; auto time_taken = get_time() - start; std::cout << "gpu performance including transfers: " << n / time_taken / 1e6 << " million keys/s\n"; std::cout << "gpu performance without transfers: " << n / sort_time / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "gpu sort: " << (pass ? "passed\n\n" : "failed\n\n"); } void benchmark_host(thrust::host_vector<double> values_host) { size_t n = values_host.size(); auto start = get_time(); // sort values on host std::sort(values_host.begin(), values_host.end()); auto time_taken = get_time(); std::cout << "host performance: " << n / time_taken / 1e6 << " million keys/s\n"; // check for errors bool pass = std::is_sorted(values_host.begin(), values_host.end()); std::cout << "host sort: " << (pass ? "passed\n\n" : "failed\n\n"); } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); std::cout << "sort test of length n = " << n << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl << std::endl; // fill a vector with random values thrust::host_vector<double> values_host(n); std::generate(values_host.begin(), values_host.end(), drand48); // start the nvprof profiling cudaProfilerStart(); benchmark_gpu(values_host); benchmark_host(values_host); // stop the profiling session cudaProfilerStop(); return 0; }
312985c6f68da60241a462568fe3a4bb180f54f6.hip
// !!! This is a file automatically generated by hipify!!! // Includes, system // #include <stdio.h> // #include <stdlib.h> // Includes, cuda // #include <hip/hip_runtime.h> // #include <rocblas.h> // Includes, cuda helper functions // #include <helper_cuda.h> // For the functors #include "caffe/3rdparty/detail/ctc_helper.h" #include "caffe/3rdparty/ctc.h" const int warp_size = 32; template<int NT, typename T, typename Rop> struct CTAReduce; template<int NT, typename T, typename Rop> struct CTAReduce { enum { Size = NT, Capacity = NT }; struct Storage { T shared[Capacity]; }; __device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) { T* s = storage.shared; s[tid] = x; __syncthreads(); // Fold the data in half with each pass. #pragma unroll for(int offset = NT / 2; offset >= warp_size; offset /= 2) { if(tid + offset < count && tid < offset) { // Read from the right half and store to the left half. x = g(x, s[offset + tid]); s[tid] = x; } __syncthreads(); } T shuff; for (int offset = warp_size / 2; offset > 0; offset /= 2) { shuff = __shfl_down_sync(0xFFFFFFFF, x, offset); if (tid + offset < count && tid < offset) x = g(x, shuff); } return x; } }; template <int NT, typename Iop, typename Rop, typename T> __global__ void reduce_rows(Iop f, Rop g, const T* input, T* output, int num_rows, int num_cols) { typedef CTAReduce<NT, T, Rop> R; __shared__ typename R::Storage storage; int tid = threadIdx.x; int idx = tid; int col = blockIdx.x; T curr; // Each block works on a column if (idx < num_rows) curr = f(input[idx + col*num_rows]); idx += NT; while (idx < num_rows) { curr = g(curr, f(input[idx + col*num_rows])); idx += NT; } // Sum thread-totals over the CTA. curr = R::reduce(tid, curr, storage, num_rows, g); // Store result in out if (tid == 0) output[col] = curr; } template <int NT, typename Iop, typename Rop, typename T> __global__ void reduce_cols(Iop f, Rop g, const T* input, T* output, int num_rows, int num_cols) { __shared__ T s[NT]; int warps_per_block = NT / warp_size; int row = blockDim.x * blockIdx.x + threadIdx.x; int col = threadIdx.y; T curr; if (row < num_rows && col < num_cols) { curr = f(input[row + col*num_rows]); col += blockDim.y; while (col < num_cols) { curr = g(curr, f(input[row + col*num_rows])); col += blockDim.y; } } s[threadIdx.x * warps_per_block + threadIdx.y] = curr; __syncthreads(); // Reduce if (threadIdx.y == 0 && row < num_rows) { #pragma unroll for (int i = 1; i < warps_per_block && i < num_cols; ++i) curr = g(curr, s[i + threadIdx.x * warps_per_block]); output[row] = curr; } } struct ReduceHelper { template<typename T, typename Iof, typename Rof> static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, hipStream_t stream) { int grid_size; if (axis) { grid_size = num_cols; hipLaunchKernelGGL(( reduce_rows<128>), dim3(grid_size), dim3(128), 0, stream, f, g, input, output, num_rows, num_cols); } else { dim3 tpb(warp_size, 128 / warp_size); grid_size = (num_cols + warp_size - 1)/warp_size; hipLaunchKernelGGL(( reduce_cols<128>), dim3(grid_size), dim3(tpb), 0, stream, f, g, input, output, num_rows, num_cols); } } }; template<typename T, typename Iof, typename Rof> ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, hipStream_t stream) { ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream); hipStreamSynchronize(stream); hipError_t err = hipGetLastError(); if (err != hipSuccess) return CTC_STATUS_EXECUTION_FAILED; return CTC_STATUS_SUCCESS; } ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) { return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream); } ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) { return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream); } ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) { return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream); }
312985c6f68da60241a462568fe3a4bb180f54f6.cu
// Includes, system // #include <stdio.h> // #include <stdlib.h> // Includes, cuda // #include <cuda_runtime.h> // #include <cublas_v2.h> // Includes, cuda helper functions // #include <helper_cuda.h> // For the functors #include "caffe/3rdparty/detail/ctc_helper.h" #include "caffe/3rdparty/ctc.h" const int warp_size = 32; template<int NT, typename T, typename Rop> struct CTAReduce; template<int NT, typename T, typename Rop> struct CTAReduce { enum { Size = NT, Capacity = NT }; struct Storage { T shared[Capacity]; }; __device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) { T* s = storage.shared; s[tid] = x; __syncthreads(); // Fold the data in half with each pass. #pragma unroll for(int offset = NT / 2; offset >= warp_size; offset /= 2) { if(tid + offset < count && tid < offset) { // Read from the right half and store to the left half. x = g(x, s[offset + tid]); s[tid] = x; } __syncthreads(); } T shuff; for (int offset = warp_size / 2; offset > 0; offset /= 2) { shuff = __shfl_down_sync(0xFFFFFFFF, x, offset); if (tid + offset < count && tid < offset) x = g(x, shuff); } return x; } }; template <int NT, typename Iop, typename Rop, typename T> __global__ void reduce_rows(Iop f, Rop g, const T* input, T* output, int num_rows, int num_cols) { typedef CTAReduce<NT, T, Rop> R; __shared__ typename R::Storage storage; int tid = threadIdx.x; int idx = tid; int col = blockIdx.x; T curr; // Each block works on a column if (idx < num_rows) curr = f(input[idx + col*num_rows]); idx += NT; while (idx < num_rows) { curr = g(curr, f(input[idx + col*num_rows])); idx += NT; } // Sum thread-totals over the CTA. curr = R::reduce(tid, curr, storage, num_rows, g); // Store result in out if (tid == 0) output[col] = curr; } template <int NT, typename Iop, typename Rop, typename T> __global__ void reduce_cols(Iop f, Rop g, const T* input, T* output, int num_rows, int num_cols) { __shared__ T s[NT]; int warps_per_block = NT / warp_size; int row = blockDim.x * blockIdx.x + threadIdx.x; int col = threadIdx.y; T curr; if (row < num_rows && col < num_cols) { curr = f(input[row + col*num_rows]); col += blockDim.y; while (col < num_cols) { curr = g(curr, f(input[row + col*num_rows])); col += blockDim.y; } } s[threadIdx.x * warps_per_block + threadIdx.y] = curr; __syncthreads(); // Reduce if (threadIdx.y == 0 && row < num_rows) { #pragma unroll for (int i = 1; i < warps_per_block && i < num_cols; ++i) curr = g(curr, s[i + threadIdx.x * warps_per_block]); output[row] = curr; } } struct ReduceHelper { template<typename T, typename Iof, typename Rof> static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, cudaStream_t stream) { int grid_size; if (axis) { grid_size = num_cols; reduce_rows<128><<<grid_size, 128, 0, stream>>> (f, g, input, output, num_rows, num_cols); } else { dim3 tpb(warp_size, 128 / warp_size); grid_size = (num_cols + warp_size - 1)/warp_size; reduce_cols<128><<<grid_size, tpb, 0, stream>>> (f, g, input, output, num_rows, num_cols); } } }; template<typename T, typename Iof, typename Rof> ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, cudaStream_t stream) { ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream); cudaStreamSynchronize(stream); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return CTC_STATUS_EXECUTION_FAILED; return CTC_STATUS_SUCCESS; } ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) { return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream); } ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) { return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream); } ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) { return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream); }
tiffNormalization.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "../include/book.h" #include "../include/commCuda.h" #include "../../include/tiffImageIO.h" /** * @Device * Active factor GeoTiff pixel normalization. */ __global__ void normalizeActiveRasterPixel( float *pixelMatrix, int *nPixels, double *rasterMinMax ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < *nPixels) { if ( pixelMatrix[tid] - 0xE0000000 != 0 ) // float nullPixelValue = 0xE0000000; { pixelMatrix[tid] = (pixelMatrix[tid] - rasterMinMax[0]) / (rasterMinMax[1] - rasterMinMax[0]); } tid += blockDim.x * gridDim.x; } } /** * @Device * Negative factor GeoTiff pixel normalization. */ __global__ void normalizeNegativeRasterPixel( float *pixelMatrix, int *nPixels, double *rasterMinMax ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < *nPixels) { pixelMatrix[tid] = (rasterMinMax[1] - pixelMatrix[tid]) / (rasterMinMax[1] - rasterMinMax[0]); tid += blockDim.x * gridDim.x; } } /** * <Core Function> * Factor GeoTiff pixel normalization. * @param pixelMatrix -> raster pixel value array * @param tiffWidth -> raster width * @param tiffHeigth -> raster length * @param rasterMinMax -> min & max value in raster pixels * @param factorType -> evaluation factor type (Active/Negative) */ void rasterPixelNormalization(float *pixelMatrix, int tiffWidth, int tiffHeigth, const double rasterMinMax[2], envFactorType factorType) { int nPixels = tiffWidth * tiffHeigth; float *dev_pixelMatrix = NULL; int *dev_nPixels = NULL; double *dev_rasterMinMax = NULL; HANDLE_ERROR( hipMalloc( (void**)&dev_pixelMatrix, nPixels * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_nPixels, sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_rasterMinMax, 2 * sizeof(double) ) ); HANDLE_ERROR( hipMemcpy( dev_pixelMatrix, pixelMatrix, nPixels * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_nPixels, &nPixels, sizeof(int), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_rasterMinMax, rasterMinMax, 2 * sizeof(double), hipMemcpyHostToDevice ) ); /******************** Preparation for CUDA execution time recording ********************/ hipEvent_t timeStartEvent, timeEndEvent; HANDLE_ERROR( hipEventCreate( &timeStartEvent, 0 ) ); HANDLE_ERROR( hipEventCreate( &timeEndEvent, 0 ) ); HANDLE_ERROR( hipEventRecord( timeStartEvent, 0 ) ); /******************** ******************************************** ********************/ if ( factorType == factor_Active ) { hipLaunchKernelGGL(( normalizeActiveRasterPixel), dim3(128), dim3(128), 0, 0, dev_pixelMatrix, dev_nPixels, dev_rasterMinMax ); } else { hipLaunchKernelGGL(( normalizeNegativeRasterPixel), dim3(128), dim3(128), 0, 0, dev_pixelMatrix, dev_nPixels, dev_rasterMinMax ); } /********************** Check out CUDA execution time recording ***********************/ HANDLE_ERROR( hipEventRecord( timeEndEvent, 0 ) ); HANDLE_ERROR( hipEventSynchronize(timeEndEvent) ); float elapsedTime = 0; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, timeStartEvent, timeEndEvent ) ); printf( "Time Consumption: %f ms. \n", elapsedTime ); HANDLE_ERROR( hipEventDestroy( timeStartEvent ) ); HANDLE_ERROR( hipEventDestroy( timeEndEvent ) ); /******************** ******************************************** ********************/ HANDLE_ERROR( hipMemcpy( pixelMatrix, dev_pixelMatrix, nPixels * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipFree( dev_pixelMatrix ) ); HANDLE_ERROR( hipFree( dev_nPixels ) ); HANDLE_ERROR( hipFree( dev_rasterMinMax ) ); } /** * <Interface> * Factor GeoTiff pixel normalization. * @param srcTifFile -> Source GeoTiff file path * @param outputTifFile -> Result output file path * @param factorType -> Evaluation factor type (Active/Negative) */ void geoTiffRasterPixelNormalization( const char srcTifFile[], const char outputTifFile[], envFactorType factorType ) { float *rasterPixels = NULL; int tifWidth, tifLength; double *rasterMinMax; rasterMinMax = (double*)malloc( sizeof(double) * 2 ); if ( rasterMinMax == NULL ) { ERROR_INFO( "Out of memory" ); return; } readTiffImageToMatrix( srcTifFile, 1, &rasterPixels ); getTiffWidthLength( srcTifFile, 1, &tifWidth, &tifLength ); getTiffMinMax( srcTifFile, 1, rasterMinMax, 1 ); rasterPixelNormalization( rasterPixels, tifWidth, tifLength, rasterMinMax, factorType ); writeTiffImageRefSrc( outputTifFile, srcTifFile, 1, rasterPixels ); double normalizedRasterMinMax[] = { 0, 1 }; alterRasterMinMax( outputTifFile, 1, normalizedRasterMinMax ); free(rasterMinMax); }
tiffNormalization.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "../include/book.h" #include "../include/commCuda.h" #include "../../include/tiffImageIO.h" /** * @Device * Active factor GeoTiff pixel normalization. */ __global__ void normalizeActiveRasterPixel( float *pixelMatrix, int *nPixels, double *rasterMinMax ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < *nPixels) { if ( pixelMatrix[tid] - 0xE0000000 != 0 ) // float nullPixelValue = 0xE0000000; { pixelMatrix[tid] = (pixelMatrix[tid] - rasterMinMax[0]) / (rasterMinMax[1] - rasterMinMax[0]); } tid += blockDim.x * gridDim.x; } } /** * @Device * Negative factor GeoTiff pixel normalization. */ __global__ void normalizeNegativeRasterPixel( float *pixelMatrix, int *nPixels, double *rasterMinMax ) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < *nPixels) { pixelMatrix[tid] = (rasterMinMax[1] - pixelMatrix[tid]) / (rasterMinMax[1] - rasterMinMax[0]); tid += blockDim.x * gridDim.x; } } /** * <Core Function> * Factor GeoTiff pixel normalization. * @param pixelMatrix -> raster pixel value array * @param tiffWidth -> raster width * @param tiffHeigth -> raster length * @param rasterMinMax -> min & max value in raster pixels * @param factorType -> evaluation factor type (Active/Negative) */ void rasterPixelNormalization(float *pixelMatrix, int tiffWidth, int tiffHeigth, const double rasterMinMax[2], envFactorType factorType) { int nPixels = tiffWidth * tiffHeigth; float *dev_pixelMatrix = NULL; int *dev_nPixels = NULL; double *dev_rasterMinMax = NULL; HANDLE_ERROR( cudaMalloc( (void**)&dev_pixelMatrix, nPixels * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_nPixels, sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_rasterMinMax, 2 * sizeof(double) ) ); HANDLE_ERROR( cudaMemcpy( dev_pixelMatrix, pixelMatrix, nPixels * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_nPixels, &nPixels, sizeof(int), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_rasterMinMax, rasterMinMax, 2 * sizeof(double), cudaMemcpyHostToDevice ) ); /******************** Preparation for CUDA execution time recording ********************/ cudaEvent_t timeStartEvent, timeEndEvent; HANDLE_ERROR( cudaEventCreate( &timeStartEvent, 0 ) ); HANDLE_ERROR( cudaEventCreate( &timeEndEvent, 0 ) ); HANDLE_ERROR( cudaEventRecord( timeStartEvent, 0 ) ); /******************** ******************************************** ********************/ if ( factorType == factor_Active ) { normalizeActiveRasterPixel<<<128, 128>>>( dev_pixelMatrix, dev_nPixels, dev_rasterMinMax ); } else { normalizeNegativeRasterPixel<<<128, 128>>>( dev_pixelMatrix, dev_nPixels, dev_rasterMinMax ); } /********************** Check out CUDA execution time recording ***********************/ HANDLE_ERROR( cudaEventRecord( timeEndEvent, 0 ) ); HANDLE_ERROR( cudaEventSynchronize(timeEndEvent) ); float elapsedTime = 0; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, timeStartEvent, timeEndEvent ) ); printf( "Time Consumption: %f ms. \n", elapsedTime ); HANDLE_ERROR( cudaEventDestroy( timeStartEvent ) ); HANDLE_ERROR( cudaEventDestroy( timeEndEvent ) ); /******************** ******************************************** ********************/ HANDLE_ERROR( cudaMemcpy( pixelMatrix, dev_pixelMatrix, nPixels * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaFree( dev_pixelMatrix ) ); HANDLE_ERROR( cudaFree( dev_nPixels ) ); HANDLE_ERROR( cudaFree( dev_rasterMinMax ) ); } /** * <Interface> * Factor GeoTiff pixel normalization. * @param srcTifFile -> Source GeoTiff file path * @param outputTifFile -> Result output file path * @param factorType -> Evaluation factor type (Active/Negative) */ void geoTiffRasterPixelNormalization( const char srcTifFile[], const char outputTifFile[], envFactorType factorType ) { float *rasterPixels = NULL; int tifWidth, tifLength; double *rasterMinMax; rasterMinMax = (double*)malloc( sizeof(double) * 2 ); if ( rasterMinMax == NULL ) { ERROR_INFO( "Out of memory" ); return; } readTiffImageToMatrix( srcTifFile, 1, &rasterPixels ); getTiffWidthLength( srcTifFile, 1, &tifWidth, &tifLength ); getTiffMinMax( srcTifFile, 1, rasterMinMax, 1 ); rasterPixelNormalization( rasterPixels, tifWidth, tifLength, rasterMinMax, factorType ); writeTiffImageRefSrc( outputTifFile, srcTifFile, 1, rasterPixels ); double normalizedRasterMinMax[] = { 0, 1 }; alterRasterMinMax( outputTifFile, 1, normalizedRasterMinMax ); free(rasterMinMax); }