text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> #include "bondsStructs.h" #include "bondsKernelsGpu.cu" #include "bondsKernelsCpu.cu" #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) int monthLengthCpu(int month, bool leapYear) { int MonthLength[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; int MonthLeapLength[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]); } int monthOffsetCpu(int m, bool leapYear) { int MonthOffset[] = { 0, 31, 59, 90, 120, 151, // Jan - Jun 181, 212, 243, 273, 304, 334, // Jun - Dec 365 // used in dayOfMonth to bracket day }; int MonthLeapOffset[] = { 0, 31, 60, 91, 121, 152, // Jan - Jun 182, 213, 244, 274, 305, 335, // Jun - Dec 366 // used in dayOfMonth to bracket day }; return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]); } int yearOffsetCpu(int y) { // the list of all December 31st in the preceding year // e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900 int YearOffset[] = { // 1900-1909 0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, // 1910-1919 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, // 1920-1929 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593, // 1930-1939 10958,11323,11688,12054,12419,12784,13149,13515,13880,14245, // 1940-1949 14610,14976,15341,15706,16071,16437,16802,17167,17532,17898, // 1950-1959 18263,18628,18993,19359,19724,20089,20454,20820,21185,21550, // 1960-1969 21915,22281,22646,23011,23376,23742,24107,24472,24837,25203, // 1970-1979 25568,25933,26298,26664,27029,27394,27759,28125,28490,28855, // 1980-1989 29220,29586,29951,30316,30681,31047,31412,31777,32142,32508, // 1990-1999 32873,33238,33603,33969,34334,34699,35064,35430,35795,36160, // 2000-2009 36525,36891,37256,37621,37986,38352,38717,39082,39447,39813, // 2010-2019 40178,40543,40908,41274,41639,42004,42369,42735,43100,43465, // 2020-2029 43830,44196,44561,44926,45291,45657,46022,46387,46752,47118, // 2030-2039 47483,47848,48213,48579,48944,49309,49674,50040,50405,50770, // 2040-2049 51135,51501,51866,52231,52596,52962,53327,53692,54057,54423, // 2050-2059 54788,55153,55518,55884,56249,56614,56979,57345,57710,58075, // 2060-2069 58440,58806,59171,59536,59901,60267,60632,60997,61362,61728, // 2070-2079 62093,62458,62823,63189,63554,63919,64284,64650,65015,65380, // 2080-2089 65745,66111,66476,66841,67206,67572,67937,68302,68667,69033, // 2090-2099 69398,69763,70128,70494,70859,71224,71589,71955,72320,72685, // 2100-2109 73050,73415,73780,74145,74510,74876,75241,75606,75971,76337, // 2110-2119 76702,77067,77432,77798,78163,78528,78893,79259,79624,79989, // 2120-2129 80354,80720,81085,81450,81815,82181,82546,82911,83276,83642, // 2130-2139 84007,84372,84737,85103,85468,85833,86198,86564,86929,87294, // 2140-2149 87659,88025,88390,88755,89120,89486,89851,90216,90581,90947, // 2150-2159 91312,91677,92042,92408,92773,93138,93503,93869,94234,94599, // 2160-2169 94964,95330,95695,96060,96425,96791,97156,97521,97886,98252, // 2170-2179 98617,98982,99347,99713,100078,100443,100808,101174,101539,101904, // 2180-2189 102269,102635,103000,103365,103730,104096,104461,104826,105191,105557, // 2190-2199 105922,106287,106652,107018,107383,107748,108113,108479,108844,109209, // 2200 109574 }; return YearOffset[y-1900]; } bool isLeapCpu(int y) { bool YearIsLeap[] = { // 1900 is leap in agreement with Excel's bug // 1900 is out of valid date range anyway // 1900-1909 true,false,false,false, true,false,false,false, true,false, // 1910-1919 false,false, true,false,false,false, true,false,false,false, // 1920-1929 true,false,false,false, true,false,false,false, true,false, // 1930-1939 false,false, true,false,false,false, true,false,false,false, // 1940-1949 true,false,false,false, true,false,false,false, true,false, // 1950-1959 false,false, true,false,false,false, true,false,false,false, // 1960-1969 true,false,false,false, true,false,false,false, true,false, // 1970-1979 false,false, true,false,false,false, true,false,false,false, // 1980-1989 true,false,false,false, true,false,false,false, true,false, // 1990-1999 false,false, true,false,false,false, true,false,false,false, // 2000-2009 true,false,false,false, true,false,false,false, true,false, // 2010-2019 false,false, true,false,false,false, true,false,false,false, // 2020-2029 true,false,false,false, true,false,false,false, true,false, // 2030-2039 false,false, true,false,false,false, true,false,false,false, // 2040-2049 true,false,false,false, true,false,false,false, true,false, // 2050-2059 false,false, true,false,false,false, true,false,false,false, // 2060-2069 true,false,false,false, true,false,false,false, true,false, // 2070-2079 false,false, true,false,false,false, true,false,false,false, // 2080-2089 true,false,false,false, true,false,false,false, true,false, // 2090-2099 false,false, true,false,false,false, true,false,false,false, // 2100-2109 false,false,false,false, true,false,false,false, true,false, // 2110-2119 false,false, true,false,false,false, true,false,false,false, // 2120-2129 true,false,false,false, true,false,false,false, true,false, // 2130-2139 false,false, true,false,false,false, true,false,false,false, // 2140-2149 true,false,false,false, true,false,false,false, true,false, // 2150-2159 false,false, true,false,false,false, true,false,false,false, // 2160-2169 true,false,false,false, true,false,false,false, true,false, // 2170-2179 false,false, true,false,false,false, true,false,false,false, // 2180-2189 true,false,false,false, true,false,false,false, true,false, // 2190-2199 false,false, true,false,false,false, true,false,false,false, // 2200 false }; return YearIsLeap[y-1900]; } bondsDateStruct intializeDateCpu(int d, int m, int y) { bondsDateStruct currDate; currDate.day = d; currDate.month = m; currDate.year = y; bool leap = isLeapCpu(y); int offset = monthOffsetCpu(m,leap); currDate.dateSerialNum = d + offset + yearOffsetCpu(y); return currDate; } void runBoundsEngine() { //can run multiple times with different number of bonds by uncommenting these lines int nBondsArray[] = {1000000}; for (int numTime=0; numTime < 1; numTime++) { int numBonds = nBondsArray[numTime]; printf("\nNumber of Bonds: %d\n\n", numBonds); inArgsStruct inArgsHost; inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct)); inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct)); inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType)); inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct)); inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType)); srand (123); int numBond; for (numBond = 0; numBond < numBonds; numBond++) { dataType repoRate = 0.07; //int repoSettlementDays = 0; int repoCompounding = SIMPLE_INTEREST; dataType repoCompoundFreq = 1; // assume a ten year bond- this is irrelevant bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2)); bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2)); bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year); bondStruct bond; bond.startDate = bondIssueDate; bond.maturityDate = bondMaturityDate; bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1; dataType bondCouponFrequency = 2; dataType bondCleanPrice = 89.97693786; bondsYieldTermStruct bondCurve; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.forward = -0.1f; // dummy rate bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; bondCurve.dayCounter = USE_EXACT_DAY; bondCurve.refDate = todaysDate; bondCurve.calDate = todaysDate; bondCurve.compounding = COMPOUNDED_INTEREST; bondCurve.frequency = bondCouponFrequency; dataType dummyStrike = 91.5745; bondsYieldTermStruct repoCurve; repoCurve.refDate = todaysDate; repoCurve.calDate = todaysDate; repoCurve.forward = repoRate; repoCurve.compounding = repoCompounding; repoCurve.frequency = repoCompoundFreq; repoCurve.dayCounter = USE_SERIAL_NUMS; inArgsHost.discountCurve[numBond] = bondCurve; inArgsHost.repoCurve[numBond] = repoCurve; inArgsHost.currDate[numBond] = todaysDate; inArgsHost.maturityDate[numBond] = bondMaturityDate; inArgsHost.bondCleanPrice[numBond] = bondCleanPrice; inArgsHost.bond[numBond] = bond; inArgsHost.dummyStrike[numBond] = dummyStrike; } printf("Inputs for bond with index %d\n", numBonds/2); printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month, inArgsHost.bond[numBonds/2].startDate.day, inArgsHost.bond[numBonds/2].startDate.year); printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month, inArgsHost.bond[numBonds/2].maturityDate.day, inArgsHost.bond[numBonds/2].maturityDate.year); printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate); resultsStruct resultsHost; resultsStruct resultsFromGpu; resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType)); resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));; resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));; bondsYieldTermStruct* discountCurveGpu; bondsYieldTermStruct* repoCurveGpu; bondsDateStruct* currDateGpu; bondsDateStruct* maturityDateGpu; dataType* bondCleanPriceGpu; bondStruct* bondGpu; dataType* dummyStrikeGpu; dataType* dirtyPriceGpu; dataType* accruedAmountCurrDateGpu; dataType* cleanPriceGpu; dataType* bondForwardValGpu; cudaMalloc((void**)&discountCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); cudaMalloc((void**)&repoCurveGpu, numBonds*sizeof(bondsYieldTermStruct)); cudaMalloc((void**)&currDateGpu, numBonds*sizeof(bondsDateStruct)); cudaMalloc((void**)&maturityDateGpu, numBonds*sizeof(bondsDateStruct)); cudaMalloc((void**)&bondCleanPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&bondGpu, numBonds*sizeof(bondStruct)); cudaMalloc((void**)&dummyStrikeGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&dirtyPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&accruedAmountCurrDateGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&cleanPriceGpu, numBonds*sizeof(dataType)); cudaMalloc((void**)&bondForwardValGpu, numBonds*sizeof(dataType)); cudaMemcpy(discountCurveGpu, inArgsHost.discountCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice); cudaMemcpy(repoCurveGpu, inArgsHost.repoCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice); cudaMemcpy(currDateGpu, inArgsHost.currDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice); cudaMemcpy(maturityDateGpu, inArgsHost.maturityDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice); cudaMemcpy(bondCleanPriceGpu, inArgsHost.bondCleanPrice, numBonds*sizeof(dataType), cudaMemcpyHostToDevice); cudaMemcpy(bondGpu, inArgsHost.bond, numBonds*sizeof(bondStruct), cudaMemcpyHostToDevice); cudaMemcpy(dummyStrikeGpu, inArgsHost.dummyStrike, numBonds*sizeof(dataType), cudaMemcpyHostToDevice); long seconds, useconds; float mtimeCpu; float mtimeGpu; struct timeval start; struct timeval end; inArgsStruct inArgs; inArgs.discountCurve = discountCurveGpu; inArgs.repoCurve = repoCurveGpu; inArgs.currDate = currDateGpu; inArgs.maturityDate = maturityDateGpu; inArgs.bondCleanPrice = bondCleanPriceGpu; inArgs.bond = bondGpu; inArgs.dummyStrike = dummyStrikeGpu; resultsStruct results; results.dirtyPrice = dirtyPriceGpu; results.accruedAmountCurrDate = accruedAmountCurrDateGpu; results.cleanPrice = cleanPriceGpu; results.bondForwardVal = bondForwardValGpu; gettimeofday(&start, NULL); dim3 grid((ceil(((float)numBonds)/((float)256.0f))), 1, 1); dim3 threads(256, 1, 1); getBondsResultsGpu <<< dim3(grid), dim3(threads ) >>> (inArgs, results, numBonds); cudaDeviceSynchronize(); gettimeofday(&end, NULL); cudaMemcpy(resultsFromGpu.dirtyPrice, dirtyPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.accruedAmountCurrDate, accruedAmountCurrDateGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.cleanPrice, cleanPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); cudaMemcpy(resultsFromGpu.bondForwardVal, bondForwardValGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeGpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on GPU\n"); printf("Processing time on GPU: %f (ms) \n\n", mtimeGpu); double totPrice = 0.0; int numBond1; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsFromGpu.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on GPU: %f\n", totPrice); printf("Outputs on GPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]); gettimeofday(&start, NULL); getBondsResultsCpu(inArgsHost, resultsHost, numBonds); gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeCpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on CPU\n"); printf("Processing time on CPU: %f (ms) \n\n", mtimeCpu); totPrice = 0.0; for (numBond1= 0; numBond1< numBonds; numBond1++) { totPrice += resultsHost.dirtyPrice[numBond1]; } printf("Sum of output dirty prices on CPU: %f\n", totPrice); printf("Outputs on CPU for bond with index %d: \n", numBonds/2); printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]); printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]); printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]); printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]); printf("Speedup using GPU: %f\n", mtimeCpu/mtimeGpu); cudaFree(discountCurveGpu); cudaFree(repoCurveGpu); cudaFree(currDateGpu); cudaFree(maturityDateGpu); cudaFree(bondCleanPriceGpu); cudaFree(bondGpu); cudaFree(dummyStrikeGpu); cudaFree(dirtyPriceGpu); cudaFree(accruedAmountCurrDateGpu); cudaFree(cleanPriceGpu); cudaFree(bondForwardValGpu); free(resultsHost.dirtyPrice); free(resultsHost.accruedAmountCurrDate);; free(resultsHost.cleanPrice);; free(resultsHost.bondForwardVal);; free(resultsFromGpu.dirtyPrice); free(resultsFromGpu.accruedAmountCurrDate);; free(resultsFromGpu.cleanPrice);; free(resultsFromGpu.bondForwardVal); free(inArgsHost.discountCurve); free(inArgsHost.repoCurve); free(inArgsHost.currDate); free(inArgsHost.maturityDate); free(inArgsHost.bondCleanPrice); free(inArgsHost.bond); free(inArgsHost.dummyStrike); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runBoundsEngine(); return 0; }
the_stack
#include <algorithm> #include <fstream> #include <memory> #include <string> #include <future> #include <condition_variable> #include <mutex> #include <thread> #include <queue> #include <functional> #if defined(_WIN32) # include <Windows.h> # include <wingdi.h> # include <Shlwapi.h> # pragma comment(lib, "shlwapi.lib") # undef min # undef max #else # include <dirent.h> # include <sys/types.h> # include <sys/stat.h> # include <unistd.h> # include <stdarg.h> #endif namespace SimpleYolo{ using namespace nvinfer1; using namespace std; using namespace cv; #define CURRENT_DEVICE_ID -1 #define GPU_BLOCK_THREADS 512 #define KernelPositionBlock \ int position = (blockDim.x * blockIdx.x + threadIdx.x); \ if (position >= (edge)) return; #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__) static bool check_runtime(cudaError_t e, const char* call, int line, const char *file); #define checkCudaKernel(...) \ __VA_ARGS__; \ do{cudaError_t cudaStatus = cudaPeekAtLastError(); \ if (cudaStatus != cudaSuccess){ \ INFOE("launch failed: %s", cudaGetErrorString(cudaStatus)); \ }} while(0); #define Assert(op) \ do{ \ bool cond = !(!(op)); \ if(!cond){ \ INFOF("Assert failed, " #op); \ } \ }while(false) /* 修改这个level来实现修改日志输出级别 */ #define CURRENT_LOG_LEVEL LogLevel::Info #define INFOD(...) __log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__) #define INFOV(...) __log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__) #define INFO(...) __log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__) #define INFOW(...) __log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__) #define INFOE(...) __log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__) #define INFOF(...) __log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__) enum class NormType : int{ None = 0, MeanStd = 1, AlphaBeta = 2 }; enum class ChannelType : int{ None = 0, SwapRB = 1 }; /* 归一化操作,可以支持均值标准差,alpha beta,和swap RB */ struct Norm{ float mean[3]; float std[3]; float alpha, beta; NormType type = NormType::None; ChannelType channel_type = ChannelType::None; // out = (x * alpha - mean) / std static Norm mean_std(const float mean[3], const float std[3], float alpha = 1/255.0f, ChannelType channel_type=ChannelType::None); // out = x * alpha + beta static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None); // None static Norm None(); }; Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } /* 构造时设置当前gpuid,析构时修改为原来的gpuid */ class AutoDevice{ public: AutoDevice(int device_id = 0){ cudaGetDevice(&old_); checkCudaRuntime(cudaSetDevice(device_id)); } virtual ~AutoDevice(){ checkCudaRuntime(cudaSetDevice(old_)); } private: int old_ = -1; }; enum class LogLevel : int{ Debug = 5, Verbose = 4, Info = 3, Warning = 2, Error = 1, Fatal = 0 }; static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...); inline int upbound(int n, int align = 32){return (n + align - 1) / align * align;} static bool check_runtime(cudaError_t e, const char* call, int line, const char *file){ if (e != cudaSuccess) { INFOE("CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d", call, cudaGetErrorString(e), cudaGetErrorName(e), e, file, line); return false; } return true; } #define TRT_STR(v) #v #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) "." TRT_STR(minor) "." TRT_STR(patch) "." TRT_STR(build) const char* trt_version(){ return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD); } static bool check_device_id(int device_id){ int device_count = -1; checkCudaRuntime(cudaGetDeviceCount(&device_count)); if(device_id < 0 || device_id >= device_count){ INFOE("Invalid device id: %d, count = %d", device_id, device_count); return false; } return true; } static bool exists(const string& path){ #ifdef _WIN32 return ::PathFileExistsA(path.c_str()); #else return access(path.c_str(), R_OK) == 0; #endif } static const char* level_string(LogLevel level){ switch (level){ case LogLevel::Debug: return "debug"; case LogLevel::Verbose: return "verbo"; case LogLevel::Info: return "info"; case LogLevel::Warning: return "warn"; case LogLevel::Error: return "error"; case LogLevel::Fatal: return "fatal"; default: return "unknow"; } } template<typename _T> static string join_dims(const vector<_T>& dims){ stringstream output; char buf[64]; const char* fmts[] = {"%d", " x %d"}; for(int i = 0; i < dims.size(); ++i){ snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]); output << buf; } return output.str(); } static bool save_file(const string& file, const void* data, size_t length){ FILE* f = fopen(file.c_str(), "wb"); if (!f) return false; if (data && length > 0){ if (fwrite(data, 1, length, f) != length){ fclose(f); return false; } } fclose(f); return true; } static bool save_file(const string& file, const vector<uint8_t>& data){ return save_file(file, data.data(), data.size()); } static string file_name(const string& path, bool include_suffix){ if (path.empty()) return ""; int p = path.rfind('/'); #ifdef U_OS_WINDOWS int e = path.rfind('\\'); p = std::max(p, e); #endif p += 1; //include suffix if (include_suffix) return path.substr(p); int u = path.rfind('.'); if (u == -1) return path.substr(p); if (u <= p) u = path.size(); return path.substr(p, u - p); } vector<string> glob_image_files(const string& directory){ /* 检索目录下的所有图像:"*.jpg;*.png;*.bmp;*.jpeg;*.tiff" */ vector<string> files, output; set<string> pattern_set{"jpg", "png", "bmp", "jpeg", "tiff"}; if(directory.empty()){ INFOE("Glob images from folder failed, folder is empty"); return output; } try{ vector<cv::String> files_; files_.reserve(10000); cv::glob(directory + "/*", files_, true); files.insert(files.end(), files_.begin(), files_.end()); }catch(...){ INFOE("Glob %s failed", directory.c_str()); return output; } for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; int p = file.rfind("."); if(p == -1) continue; auto suffix = file.substr(p+1); std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){ if(c >= 'A' && c <= 'Z') c -= 'A' + 'a'; return c; }); if(pattern_set.find(suffix) != pattern_set.end()) output.push_back(file); } return output; } static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){ if(level > CURRENT_LOG_LEVEL) return; va_list vl; va_start(vl, fmt); char buffer[2048]; string filename = file_name(file, true); int n = snprintf(buffer, sizeof(buffer), "[%s][%s:%d]:", level_string(level), filename.c_str(), line); vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl); fprintf(stdout, "%s\n", buffer); if (level == LogLevel::Fatal) { fflush(stdout); abort(); } } static dim3 grid_dims(int numJobs) { int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; return dim3(((numJobs + numBlockThreads - 1) / (float)numBlockThreads)); } static dim3 block_dims(int numJobs) { return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; } static int get_device(int device_id){ if(device_id != CURRENT_DEVICE_ID){ check_device_id(device_id); return device_id; } checkCudaRuntime(cudaGetDevice(&device_id)); return device_id; } void set_device(int device_id) { if (device_id == -1) return; checkCudaRuntime(cudaSetDevice(device_id)); } /////////////////////////////CUDA kernels//////////////////////////////////////////////// const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; *pout_item++ = 1; // 1 = keep, 0 = ignore } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; // left, top, right, bottom, confidence, class, keepflag float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; if(pitem[4] >= pcurrent[4]){ if(pitem[4] == pcurrent[4] && i < position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ pcurrent[6] = 0; // 1=keep, 0=ignore return; } } } } static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, cudaStream_t stream){ auto grid = grid_dims(num_bboxes); auto block = block_dims(num_bboxes); /* 如果核函数有波浪线,没关系,他是正常的,你只是看不顺眼罢了 */ checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>(predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects)); grid = grid_dims(max_objects); block = block_dims(max_objects); checkCudaKernel(fast_nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::SwapRB){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = grid_dims(jobs); auto block = block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } //////////////////////////////class MixMemory///////////////////////////////////////////////// /* gpu/cpu内存管理 自动对gpu和cpu内存进行分配和释放 这里的cpu使用的是pinned memory,当对gpu做内存复制时,性能比较好 因为是cudaMallocHost分配的,因此他与cuda context有关联 */ class MixMemory { public: MixMemory(int device_id = CURRENT_DEVICE_ID); MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); virtual ~MixMemory(); void* gpu(size_t size); void* cpu(size_t size); void release_gpu(); void release_cpu(); void release_all(); inline bool owner_gpu() const{return owner_gpu_;} inline bool owner_cpu() const{return owner_cpu_;} inline size_t cpu_size() const{return cpu_size_;} inline size_t gpu_size() const{return gpu_size_;} inline int device_id() const{return device_id_;} inline void* gpu() const { return gpu_; } // Pinned Memory inline void* cpu() const { return cpu_; } void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); private: void* cpu_ = nullptr; size_t cpu_size_ = 0; bool owner_cpu_ = true; int device_id_ = 0; void* gpu_ = nullptr; size_t gpu_size_ = 0; bool owner_gpu_ = true; }; MixMemory::MixMemory(int device_id){ device_id_ = get_device(device_id); } MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ reference_data(cpu, cpu_size, gpu, gpu_size); } void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ release_all(); if(cpu == nullptr || cpu_size == 0){ cpu = nullptr; cpu_size = 0; } if(gpu == nullptr || gpu_size == 0){ gpu = nullptr; gpu_size = 0; } this->cpu_ = cpu; this->cpu_size_ = cpu_size; this->gpu_ = gpu; this->gpu_size_ = gpu_size; this->owner_cpu_ = !(cpu && cpu_size > 0); this->owner_gpu_ = !(gpu && gpu_size > 0); checkCudaRuntime(cudaGetDevice(&device_id_)); } MixMemory::~MixMemory() { release_all(); } void* MixMemory::gpu(size_t size) { if (gpu_size_ < size) { release_gpu(); gpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMalloc(&gpu_, size)); checkCudaRuntime(cudaMemset(gpu_, 0, size)); } return gpu_; } void* MixMemory::cpu(size_t size) { if (cpu_size_ < size) { release_cpu(); cpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMallocHost(&cpu_, size)); Assert(cpu_ != nullptr); memset(cpu_, 0, size); } return cpu_; } void MixMemory::release_cpu() { if (cpu_) { if(owner_cpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFreeHost(cpu_)); } cpu_ = nullptr; } cpu_size_ = 0; } void MixMemory::release_gpu() { if (gpu_) { if(owner_gpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFree(gpu_)); } gpu_ = nullptr; } gpu_size_ = 0; } void MixMemory::release_all() { release_cpu(); release_gpu(); } /////////////////////////////////class Tensor//////////////////////////////////////////////// /* Tensor类,实现张量的管理 由于NN多用张量,必须有个类进行管理才方便,实现内存自动分配,计算索引等等 如果要调试,可以执行save_to_file,储存为文件后,在python中加载并查看 */ enum class DataHead : int{ Init = 0, Device = 1, Host = 2 }; class Tensor { public: Tensor(const Tensor& other) = delete; Tensor& operator = (const Tensor& other) = delete; explicit Tensor(std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int n, int c, int h, int w, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int ndims, const int* dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(const std::vector<int>& dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); virtual ~Tensor(); int numel() const; inline int ndims() const{return shape_.size();} inline int size(int index) const{return shape_[index];} inline int shape(int index) const{return shape_[index];} inline int batch() const{return shape_[0];} inline int channel() const{return shape_[1];} inline int height() const{return shape_[2];} inline int width() const{return shape_[3];} inline const std::vector<int>& dims() const { return shape_; } inline int bytes() const { return bytes_; } inline int bytes(int start_axis) const { return count(start_axis) * element_size(); } inline int element_size() const { return sizeof(float); } inline DataHead head() const { return head_; } std::shared_ptr<Tensor> clone() const; Tensor& release(); Tensor& set_to(float value); bool empty() const; template<typename ... _Args> int offset(int index, _Args ... index_args) const{ const int index_array[] = {index, index_args...}; return offset_array(sizeof...(index_args) + 1, index_array); } int offset_array(const std::vector<int>& index) const; int offset_array(size_t size, const int* index_array) const; template<typename ... _Args> Tensor& resize(int dim_size, _Args ... dim_size_args){ const int dim_size_array[] = {dim_size, dim_size_args...}; return resize(sizeof...(dim_size_args) + 1, dim_size_array); } Tensor& resize(int ndims, const int* dims); Tensor& resize(const std::vector<int>& dims); Tensor& resize_single_dim(int idim, int size); int count(int start_axis = 0) const; int device() const{return device_id_;} Tensor& to_gpu(bool copy=true); Tensor& to_cpu(bool copy=true); inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); } inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } template<typename DType> inline const DType* cpu() const { return (DType*)cpu(); } template<typename DType> inline DType* cpu() { return (DType*)cpu(); } template<typename DType, typename ... _Args> inline DType* cpu(int i, _Args&& ... args) { return cpu<DType>() + offset(i, args...); } template<typename DType> inline const DType* gpu() const { return (DType*)gpu(); } template<typename DType> inline DType* gpu() { return (DType*)gpu(); } template<typename DType, typename ... _Args> inline DType* gpu(int i, _Args&& ... args) { return gpu<DType>() + offset(i, args...); } template<typename DType, typename ... _Args> inline DType& at(int i, _Args&& ... args) { return *(cpu<DType>() + offset(i, args...)); } std::shared_ptr<MixMemory> get_data() const {return data_;} std::shared_ptr<MixMemory> get_workspace() const {return workspace_;} Tensor& set_workspace(std::shared_ptr<MixMemory> workspace) {workspace_ = workspace; return *this;} cudaStream_t get_stream() const{return stream_;} Tensor& set_stream(cudaStream_t stream){stream_ = stream; return *this;} Tensor& set_mat (int n, const cv::Mat& image); Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]); cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu<float>(n, c)); } Tensor& synchronize(); const char* shape_string() const{return shape_string_;} const char* descriptor() const; Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID); /** # 以下代码是python中加载Tensor import numpy as np def load_tensor(file): with open(file, "rb") as f: binary_data = f.read() magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0) assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file." dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4) if dtype == 0: np_dtype = np.float32 elif dtype == 1: np_dtype = np.float16 else: assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype" return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims) **/ bool save_to_file(const std::string& file) const; private: Tensor& compute_shape_string(); Tensor& adajust_memory_by_update_dims_or_type(); void setup_data(std::shared_ptr<MixMemory> data); private: std::vector<int> shape_; size_t bytes_ = 0; DataHead head_ = DataHead::Init; cudaStream_t stream_ = nullptr; int device_id_ = 0; char shape_string_[100]; char descriptor_string_[100]; std::shared_ptr<MixMemory> data_; std::shared_ptr<MixMemory> workspace_; }; Tensor::Tensor(int n, int c, int h, int w, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(n, c, h, w); } Tensor::Tensor(const std::vector<int>& dims, shared_ptr<MixMemory> data, int device_id){ this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(dims); } Tensor::Tensor(int ndims, const int* dims, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(ndims, dims); } Tensor::Tensor(shared_ptr<MixMemory> data, int device_id){ shape_string_[0] = 0; descriptor_string_[0] = 0; this->device_id_ = get_device(device_id); setup_data(data); } Tensor::~Tensor() { release(); } const char* Tensor::descriptor() const{ char* descriptor_ptr = (char*)descriptor_string_; int device_id = device(); snprintf(descriptor_ptr, sizeof(descriptor_string_), "Tensor:%p, %s, CUDA:%d", data_.get(), shape_string_, device_id ); return descriptor_ptr; } Tensor& Tensor::compute_shape_string(){ // clean string shape_string_[0] = 0; char* buffer = shape_string_; size_t buffer_size = sizeof(shape_string_); for(int i = 0; i < shape_.size(); ++i){ int size = 0; if(i < shape_.size() - 1) size = snprintf(buffer, buffer_size, "%d x ", shape_[i]); else size = snprintf(buffer, buffer_size, "%d", shape_[i]); buffer += size; buffer_size -= size; } return *this; } void Tensor::setup_data(shared_ptr<MixMemory> data){ data_ = data; if(data_ == nullptr){ data_ = make_shared<MixMemory>(device_id_); }else{ device_id_ = data_->device_id(); } head_ = DataHead::Init; if(data_->cpu()){ head_ = DataHead::Host; } if(data_->gpu()){ head_ = DataHead::Device; } } Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){ if(head_ == DataHead::Init) to_gpu(false); size_t offset_location = offset * element_size(); if(offset_location >= bytes_){ INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_location, bytes_); return *this; } size_t copyed_bytes = num_element * element_size(); size_t remain_bytes = bytes_ - offset_location; if(copyed_bytes > remain_bytes){ INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", copyed_bytes, remain_bytes); return *this; } if(head_ == DataHead::Device){ int current_device_id = get_device(device_id); int gpu_device_id = device(); if(current_device_id != gpu_device_id){ checkCudaRuntime(cudaMemcpyPeerAsync(gpu<unsigned char>() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_)); //checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } else{ checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } }else if(head_ == DataHead::Host){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(cpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToHost, stream_)); }else{ INFOE("Unsupport head type %d", head_); } return *this; } Tensor& Tensor::release() { data_->release_all(); shape_.clear(); bytes_ = 0; head_ = DataHead::Init; return *this; } bool Tensor::empty() const{ return data_->cpu() == nullptr && data_->gpu() == nullptr; } int Tensor::count(int start_axis) const { if(start_axis >= 0 && start_axis < shape_.size()){ int size = 1; for (int i = start_axis; i < shape_.size(); ++i) size *= shape_[i]; return size; }else{ return 0; } } Tensor& Tensor::resize(const std::vector<int>& dims) { return resize(dims.size(), dims.data()); } int Tensor::numel() const{ int value = shape_.empty() ? 0 : 1; for(int i = 0; i < shape_.size(); ++i){ value *= shape_[i]; } return value; } Tensor& Tensor::resize_single_dim(int idim, int size){ Assert(idim >= 0 && idim < shape_.size()); auto new_shape = shape_; new_shape[idim] = size; return resize(new_shape); } Tensor& Tensor::resize(int ndims, const int* dims) { vector<int> setup_dims(ndims); for(int i = 0; i < ndims; ++i){ int dim = dims[i]; if(dim == -1){ Assert(ndims == shape_.size()); dim = shape_[i]; } setup_dims[i] = dim; } this->shape_ = setup_dims; this->adajust_memory_by_update_dims_or_type(); this->compute_shape_string(); return *this; } Tensor& Tensor::adajust_memory_by_update_dims_or_type(){ int needed_size = this->numel() * element_size(); if(needed_size > this->bytes_){ head_ = DataHead::Init; } this->bytes_ = needed_size; return *this; } Tensor& Tensor::synchronize(){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaStreamSynchronize(stream_)); return *this; } Tensor& Tensor::to_gpu(bool copy) { if (head_ == DataHead::Device) return *this; head_ = DataHead::Device; data_->gpu(bytes_); if (copy && data_->cpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, cudaMemcpyHostToDevice, stream_)); } return *this; } Tensor& Tensor::to_cpu(bool copy) { if (head_ == DataHead::Host) return *this; head_ = DataHead::Host; data_->cpu(bytes_); if (copy && data_->gpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, cudaMemcpyDeviceToHost, stream_)); checkCudaRuntime(cudaStreamSynchronize(stream_)); } return *this; } int Tensor::offset_array(size_t size, const int* index_array) const{ Assert(size <= shape_.size()); int value = 0; for(int i = 0; i < shape_.size(); ++i){ if(i < size) value += index_array[i]; if(i + 1 < shape_.size()) value *= shape_[i+1]; } return value; } int Tensor::offset_array(const std::vector<int>& index_array) const{ return offset_array(index_array.size(), index_array.data()); } bool Tensor::save_to_file(const std::string& file) const{ if(empty()) return false; FILE* f = fopen(file.c_str(), "wb"); if(f == nullptr) return false; int ndims = this->ndims(); int dtype_ = 0; unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast<unsigned int>(dtype_)}; fwrite(head, 1, sizeof(head), f); fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f); fwrite(cpu(), 1, bytes_, f); fclose(f); return true; } /////////////////////////////////class TRTInferImpl//////////////////////////////////////////////// class Logger : public ILogger { public: virtual void log(Severity severity, const char* msg) noexcept override { if (severity == Severity::kINTERNAL_ERROR) { INFOE("NVInfer INTERNAL_ERROR: %s", msg); abort(); }else if (severity == Severity::kERROR) { INFOE("NVInfer: %s", msg); } else if (severity == Severity::kWARNING) { INFOW("NVInfer: %s", msg); } else if (severity == Severity::kINFO) { INFOD("NVInfer: %s", msg); } else { INFOD("%s", msg); } } }; static Logger gLogger; template<typename _T> static void destroy_nvidia_pointer(_T* ptr) { if (ptr) ptr->destroy(); } class EngineContext { public: virtual ~EngineContext() { destroy(); } void set_stream(cudaStream_t stream){ if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} owner_stream_ = false; } stream_ = stream; } bool build_model(const void* pdata, size_t size) { destroy(); if(pdata == nullptr || size == 0) return false; owner_stream_ = true; checkCudaRuntime(cudaStreamCreate(&stream_)); if(stream_ == nullptr) return false; runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroy_nvidia_pointer<IRuntime>); if (runtime_ == nullptr) return false; engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer<ICudaEngine>); if (engine_ == nullptr) return false; //runtime_->setDLACore(0); context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroy_nvidia_pointer<IExecutionContext>); return context_ != nullptr; } private: void destroy() { context_.reset(); engine_.reset(); runtime_.reset(); if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} } stream_ = nullptr; } public: cudaStream_t stream_ = nullptr; bool owner_stream_ = false; shared_ptr<IExecutionContext> context_; shared_ptr<ICudaEngine> engine_; shared_ptr<IRuntime> runtime_ = nullptr; }; class TRTInferImpl{ public: virtual ~TRTInferImpl(); bool load(const std::string& file); bool load_from_memory(const void* pdata, size_t size); void destroy(); void forward(bool sync); int get_max_batch_size(); cudaStream_t get_stream(); void set_stream(cudaStream_t stream); void synchronize(); size_t get_device_memory_size(); std::shared_ptr<MixMemory> get_workspace(); std::shared_ptr<Tensor> input(int index = 0); std::string get_input_name(int index = 0); std::shared_ptr<Tensor> output(int index = 0); std::string get_output_name(int index = 0); std::shared_ptr<Tensor> tensor(const std::string& name); bool is_output_name(const std::string& name); bool is_input_name(const std::string& name); void set_input (int index, std::shared_ptr<Tensor> tensor); void set_output(int index, std::shared_ptr<Tensor> tensor); std::shared_ptr<std::vector<uint8_t>> serial_engine(); void print(); int num_output(); int num_input(); int device(); private: void build_engine_input_and_outputs_mapper(); private: std::vector<std::shared_ptr<Tensor>> inputs_; std::vector<std::shared_ptr<Tensor>> outputs_; std::vector<int> inputs_map_to_ordered_index_; std::vector<int> outputs_map_to_ordered_index_; std::vector<std::string> inputs_name_; std::vector<std::string> outputs_name_; std::vector<std::shared_ptr<Tensor>> orderdBlobs_; std::map<std::string, int> blobsNameMapper_; std::shared_ptr<EngineContext> context_; std::vector<void*> bindingsPtr_; std::shared_ptr<MixMemory> workspace_; int device_ = 0; }; //////////////////////////////////////////////////////////////////////////////////// TRTInferImpl::~TRTInferImpl(){ destroy(); } void TRTInferImpl::destroy() { int old_device = 0; checkCudaRuntime(cudaGetDevice(&old_device)); checkCudaRuntime(cudaSetDevice(device_)); this->context_.reset(); this->blobsNameMapper_.clear(); this->outputs_.clear(); this->inputs_.clear(); this->inputs_name_.clear(); this->outputs_name_.clear(); checkCudaRuntime(cudaSetDevice(old_device)); } void TRTInferImpl::print(){ if(!context_){ INFOW("Infer print, nullptr."); return; } INFO("Infer %p detail", this); INFO("\tMax Batch Size: %d", this->get_max_batch_size()); INFO("\tInputs: %d", inputs_.size()); for(int i = 0; i < inputs_.size(); ++i){ auto& tensor = inputs_[i]; auto& name = inputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } INFO("\tOutputs: %d", outputs_.size()); for(int i = 0; i < outputs_.size(); ++i){ auto& tensor = outputs_[i]; auto& name = outputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } } std::shared_ptr<std::vector<uint8_t>> TRTInferImpl::serial_engine() { auto memory = this->context_->engine_->serialize(); auto output = make_shared<std::vector<uint8_t>>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size()); memory->destroy(); return output; } bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) { if (pdata == nullptr || size == 0) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(pdata, size)) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } static std::vector<uint8_t> load_file(const string& file){ ifstream in(file, ios::in | ios::binary); if (!in.is_open()) return {}; in.seekg(0, ios::end); size_t length = in.tellg(); std::vector<uint8_t> data; if (length > 0){ in.seekg(0, ios::beg); data.resize(length); in.read((char*)&data[0], length); } in.close(); return data; } bool TRTInferImpl::load(const std::string& file) { auto data = load_file(file); if (data.empty()) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(data.data(), data.size())) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } size_t TRTInferImpl::get_device_memory_size() { EngineContext* context = (EngineContext*)this->context_.get(); return context->context_->getEngine().getDeviceMemorySize(); } void TRTInferImpl::build_engine_input_and_outputs_mapper() { EngineContext* context = (EngineContext*)this->context_.get(); int nbBindings = context->engine_->getNbBindings(); int max_batchsize = context->engine_->getMaxBatchSize(); inputs_.clear(); inputs_name_.clear(); outputs_.clear(); outputs_name_.clear(); orderdBlobs_.clear(); bindingsPtr_.clear(); blobsNameMapper_.clear(); for (int i = 0; i < nbBindings; ++i) { auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); const char* bindingName = context->engine_->getBindingName(i); dims.d[0] = max_batchsize; auto newTensor = make_shared<Tensor>(dims.nbDims, dims.d); newTensor->set_stream(this->context_->stream_); newTensor->set_workspace(this->workspace_); if (context->engine_->bindingIsInput(i)) { //if is input inputs_.push_back(newTensor); inputs_name_.push_back(bindingName); inputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } else { //if is output outputs_.push_back(newTensor); outputs_name_.push_back(bindingName); outputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } blobsNameMapper_[bindingName] = i; orderdBlobs_.push_back(newTensor); } bindingsPtr_.resize(orderdBlobs_.size()); } void TRTInferImpl::set_stream(cudaStream_t stream){ this->context_->set_stream(stream); for(auto& t : orderdBlobs_) t->set_stream(stream); } cudaStream_t TRTInferImpl::get_stream() { return this->context_->stream_; } int TRTInferImpl::device() { return device_; } void TRTInferImpl::synchronize() { checkCudaRuntime(cudaStreamSynchronize(context_->stream_)); } bool TRTInferImpl::is_output_name(const std::string& name){ return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end(); } bool TRTInferImpl::is_input_name(const std::string& name){ return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end(); } void TRTInferImpl::forward(bool sync) { EngineContext* context = (EngineContext*)context_.get(); int inputBatchSize = inputs_[0]->size(0); for(int i = 0; i < context->engine_->getNbBindings(); ++i){ auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); dims.d[0] = inputBatchSize; if(context->engine_->bindingIsInput(i)){ context->context_->setBindingDimensions(i, dims); } } for (int i = 0; i < outputs_.size(); ++i) { outputs_[i]->resize_single_dim(0, inputBatchSize); outputs_[i]->to_gpu(false); } for (int i = 0; i < orderdBlobs_.size(); ++i) bindingsPtr_[i] = orderdBlobs_[i]->gpu(); void** bindingsptr = bindingsPtr_.data(); //bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr); bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr); if(!execute_result){ auto code = cudaGetLastError(); INFOF("execute fail, code %d[%s], message %s", code, cudaGetErrorName(code), cudaGetErrorString(code)); } if (sync) { synchronize(); } } std::shared_ptr<MixMemory> TRTInferImpl::get_workspace() { return workspace_; } int TRTInferImpl::num_input() { return this->inputs_.size(); } int TRTInferImpl::num_output() { return this->outputs_.size(); } void TRTInferImpl::set_input (int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < inputs_.size()); this->inputs_[index] = tensor; int order_index = inputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } void TRTInferImpl::set_output(int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < outputs_.size()); this->outputs_[index] = tensor; int order_index = outputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } std::shared_ptr<Tensor> TRTInferImpl::input(int index) { Assert(index >= 0 && index < inputs_name_.size()); return this->inputs_[index]; } std::string TRTInferImpl::get_input_name(int index){ Assert(index >= 0 && index < inputs_name_.size()); return inputs_name_[index]; } std::shared_ptr<Tensor> TRTInferImpl::output(int index) { Assert(index >= 0 && index < outputs_.size()); return outputs_[index]; } std::string TRTInferImpl::get_output_name(int index){ Assert(index >= 0 && index < outputs_name_.size()); return outputs_name_[index]; } int TRTInferImpl::get_max_batch_size() { Assert(this->context_ != nullptr); return this->context_->engine_->getMaxBatchSize(); } std::shared_ptr<Tensor> TRTInferImpl::tensor(const std::string& name) { Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end()); return orderdBlobs_[blobsNameMapper_[name]]; } std::shared_ptr<TRTInferImpl> load_infer(const string& file) { std::shared_ptr<TRTInferImpl> infer(new TRTInferImpl()); if (!infer->load(file)) infer.reset(); return infer; } //////////////////////////////class MonopolyAllocator////////////////////////////////////// /* 独占分配器 通过对tensor做独占管理,具有max_batch * 2个tensor,通过query获取一个 当推理结束后,该tensor释放使用权,即可交给下一个图像使用,内存实现复用 */ template<class _ItemType> class MonopolyAllocator{ public: class MonopolyData{ public: std::shared_ptr<_ItemType>& data(){ return data_; } void release(){manager_->release_one(this);} private: MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;} private: friend class MonopolyAllocator; MonopolyAllocator* manager_ = nullptr; std::shared_ptr<_ItemType> data_; bool available_ = true; }; typedef std::shared_ptr<MonopolyData> MonopolyDataPointer; MonopolyAllocator(int size){ capacity_ = size; num_available_ = size; datas_.resize(size); for(int i = 0; i < size; ++i) datas_[i] = std::shared_ptr<MonopolyData>(new MonopolyData(this)); } virtual ~MonopolyAllocator(){ run_ = false; cv_.notify_all(); std::unique_lock<std::mutex> l(lock_); cv_exit_.wait(l, [&](){ return num_wait_thread_ == 0; }); } MonopolyDataPointer query(int timeout = 10000){ std::unique_lock<std::mutex> l(lock_); if(!run_) return nullptr; if(num_available_ == 0){ num_wait_thread_++; auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){ return num_available_ > 0 || !run_; }); num_wait_thread_--; cv_exit_.notify_one(); // timeout, no available, exit program if(!state || num_available_ == 0 || !run_) return nullptr; } auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;}); if(item == datas_.end()) return nullptr; (*item)->available_ = false; num_available_--; return *item; } int num_available(){ return num_available_; } int capacity(){ return capacity_; } private: void release_one(MonopolyData* prq){ std::unique_lock<std::mutex> l(lock_); if(!prq->available_){ prq->available_ = true; num_available_++; cv_.notify_one(); } } private: std::mutex lock_; std::condition_variable cv_; std::condition_variable cv_exit_; std::vector<MonopolyDataPointer> datas_; int capacity_ = 0; volatile int num_available_ = 0; volatile int num_wait_thread_ = 0; volatile bool run_ = true; }; /////////////////////////////////////////class ThreadSafedAsyncInfer///////////////////////////////////////////// /* 异步线程安全的推理器 通过异步线程启动,使得调用方允许任意线程调用把图像做输入,并通过future来获取异步结果 */ template<class Input, class Output, class StartParam=std::tuple<std::string, int>, class JobAdditional=int> class ThreadSafedAsyncInfer{ public: struct Job{ Input input; Output output; JobAdditional additional; MonopolyAllocator<Tensor>::MonopolyDataPointer mono_tensor; std::shared_ptr<std::promise<Output>> pro; }; virtual ~ThreadSafedAsyncInfer(){ stop(); } void stop(){ run_ = false; cond_.notify_all(); ////////////////////////////////////////// cleanup jobs { std::unique_lock<std::mutex> l(jobs_lock_); while(!jobs_.empty()){ auto& item = jobs_.front(); if(item.pro) item.pro->set_value(Output()); jobs_.pop(); } }; if(worker_){ worker_->join(); worker_.reset(); } } bool startup(const StartParam& param){ run_ = true; std::promise<bool> pro; start_param_ = param; worker_ = std::make_shared<std::thread>(&ThreadSafedAsyncInfer::worker, this, std::ref(pro)); return pro.get_future().get(); } virtual std::shared_future<Output> commit(const Input& input){ Job job; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, input)){ job.pro->set_value(Output()); return job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); jobs_.push(job); }; cond_.notify_one(); return job.pro->get_future(); } virtual std::vector<std::shared_future<Output>> commits(const std::vector<Input>& inputs){ int batch_size = std::min((int)inputs.size(), this->tensor_allocator_->capacity()); std::vector<Job> jobs(inputs.size()); std::vector<std::shared_future<Output>> results(inputs.size()); int nepoch = (inputs.size() + batch_size - 1) / batch_size; for(int epoch = 0; epoch < nepoch; ++epoch){ int begin = epoch * batch_size; int end = std::min((int)inputs.size(), begin + batch_size); for(int i = begin; i < end; ++i){ Job& job = jobs[i]; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, inputs[i])){ job.pro->set_value(Output()); } results[i] = job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); for(int i = begin; i < end; ++i){ jobs_.emplace(std::move(jobs[i])); }; } cond_.notify_one(); } return results; } protected: virtual void worker(std::promise<bool>& result) = 0; virtual bool preprocess(Job& job, const Input& input) = 0; virtual bool get_jobs_and_wait(std::vector<Job>& fetch_jobs, int max_size){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_jobs.clear(); for(int i = 0; i < max_size && !jobs_.empty(); ++i){ fetch_jobs.emplace_back(std::move(jobs_.front())); jobs_.pop(); } return true; } virtual bool get_job_and_wait(Job& fetch_job){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_job = std::move(jobs_.front()); jobs_.pop(); return true; } protected: StartParam start_param_; std::atomic<bool> run_; std::mutex jobs_lock_; std::queue<Job> jobs_; std::shared_ptr<std::thread> worker_; std::condition_variable cond_; std::shared_ptr<MonopolyAllocator<Tensor>> tensor_allocator_; }; ///////////////////////////////////class YoloTRTInferImpl////////////////////////////////////// /* Yolo的具体实现 通过上述类的特性,实现预处理的计算重叠、异步垮线程调用,最终拼接为多个图为一个batch进行推理。最大化的利用 显卡性能,实现高性能高可用好用的yolo推理 */ const char* type_name(Type type){ switch(type){ case Type::V5: return "YoloV5"; case Type::X: return "YoloX"; default: return "Unknow"; } } struct AffineMatrix{ float i2d[6]; // image to dst(network), 2x3 matrix float d2i[6]; // dst to image, 2x3 matrix void compute(const cv::Size& from, const cv::Size& to){ float scale_x = to.width / (float)from.width; float scale_y = to.height / (float)from.height; float scale = std::min(scale_x, scale_y); i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5; i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5; cv::Mat m2x3_i2d(2, 3, CV_32F, i2d); cv::Mat m2x3_d2i(2, 3, CV_32F, d2i); cv::invertAffineTransform(m2x3_i2d, m2x3_d2i); } cv::Mat i2d_mat(){ return cv::Mat(2, 3, CV_32F, i2d); } }; using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer < cv::Mat, // input BoxArray, // output tuple<string, int>, // start param AffineMatrix // additional >; class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{ public: /** 要求在TRTInferImpl里面执行stop,而不是在基类执行stop **/ virtual ~YoloTRTInferImpl(){ stop(); } virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ if(type == Type::V5){ normalize_ = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = Norm::mean_std(mean, std, 1/255.0f, ChannelType::Invert); normalize_ = Norm::None(); }else{ INFOE("Unsupport type %d", type); } confidence_threshold_ = confidence_threshold; nms_threshold_ = nms_threshold; return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid)); } virtual void worker(promise<bool>& result) override{ string file = get<0>(start_param_); int gpuid = get<1>(start_param_); set_device(gpuid); auto engine = load_infer(file); if(engine == nullptr){ INFOE("Engine %s load failed", file.c_str()); result.set_value(false); return; } engine->print(); const int MAX_IMAGE_BBOX = 1024; const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag Tensor affin_matrix_device; Tensor output_array_device; int max_batch_size = engine->get_max_batch_size(); auto input = engine->tensor("images"); auto output = engine->tensor("output"); int num_classes = output->size(2) - 5; input_width_ = input->size(3); input_height_ = input->size(2); tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2); stream_ = engine->get_stream(); gpu_ = gpuid; result.set_value(true); input->resize_single_dim(0, max_batch_size).to_gpu(); affin_matrix_device.set_stream(stream_); // 这里8个值的目的是保证 8 * sizeof(float) % 32 == 0 affin_matrix_device.resize(max_batch_size, 8).to_gpu(); // 这里的 1 + MAX_IMAGE_BBOX结构是,counter + bboxes ... output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu(); vector<Job> fetch_jobs; while(get_jobs_and_wait(fetch_jobs, max_batch_size)){ int infer_batch_size = fetch_jobs.size(); input->resize_single_dim(0, infer_batch_size); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; auto& mono = job.mono_tensor->data(); affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6); input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count()); job.mono_tensor->release(); } engine->forward(false); output_array_device.to_gpu(false); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; float* image_based_output = output->gpu<float>(ibatch); float* output_array_ptr = output_array_device.gpu<float>(ibatch); auto affine_matrix = affin_matrix_device.gpu<float>(ibatch); checkCudaRuntime(cudaMemsetAsync(output_array_ptr, 0, sizeof(int), stream_)); decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_); } output_array_device.to_cpu(); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ float* parray = output_array_device.cpu<float>(ibatch); int count = min(MAX_IMAGE_BBOX, (int)*parray); auto& job = fetch_jobs[ibatch]; auto& image_based_boxes = job.output; for(int i = 0; i < count; ++i){ float* pbox = parray + 1 + i * NUM_BOX_ELEMENT; int label = pbox[5]; int keepflag = pbox[6]; if(keepflag == 1){ image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label); } } job.pro->set_value(image_based_boxes); } fetch_jobs.clear(); } stream_ = nullptr; tensor_allocator_.reset(); INFO("Engine destroy."); } virtual bool preprocess(Job& job, const Mat& image) override{ if(tensor_allocator_ == nullptr){ INFOE("tensor_allocator_ is nullptr"); return false; } job.mono_tensor = tensor_allocator_->query(); if(job.mono_tensor == nullptr){ INFOE("Tensor allocator query failed."); return false; } AutoDevice auto_device(gpu_); auto& tensor = job.mono_tensor->data(); if(tensor == nullptr){ // not init tensor = make_shared<Tensor>(); tensor->set_workspace(make_shared<MixMemory>()); } Size input_size(input_width_, input_height_); job.additional.compute(image.size(), input_size); tensor->set_stream(stream_); tensor->resize(1, 3, input_height_, input_width_); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(job.additional.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; //checkCudaRuntime(cudaMemcpyAsync(image_host, image.data, size_image, cudaMemcpyHostToHost, stream_)); // speed up memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream_)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), cudaMemcpyHostToDevice, stream_)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(), input_width_, input_height_, affine_matrix_device, 114, normalize_, stream_ ); return true; } virtual vector<shared_future<BoxArray>> commits(const vector<Mat>& images) override{ return ThreadSafedAsyncInferImpl::commits(images); } virtual std::shared_future<BoxArray> commit(const Mat& image) override{ return ThreadSafedAsyncInferImpl::commit(image); } private: int input_width_ = 0; int input_height_ = 0; int gpu_ = 0; float confidence_threshold_ = 0; float nms_threshold_ = 0; cudaStream_t stream_ = nullptr; Norm normalize_; }; void image_to_tensor(const cv::Mat& image, shared_ptr<Tensor>& tensor, Type type, int ibatch){ Norm normalize; if(type == Type::V5){ normalize = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert); normalize = Norm::None(); }else{ INFOE("Unsupport type %d", type); } Size input_size(tensor->size(3), tensor->size(2)); AffineMatrix affine; affine.compute(image.size(), input_size); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(affine.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; auto stream = tensor->get_stream(); memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), cudaMemcpyHostToDevice, stream)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(ibatch), input_size.width, input_size.height, affine_matrix_device, 114, normalize, stream ); } shared_ptr<Infer> create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ shared_ptr<YoloTRTInferImpl> instance(new YoloTRTInferImpl()); if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){ instance.reset(); } return instance; } //////////////////////////////////////Compile Model///////////////////////////////////////////////////////////// const char* mode_string(Mode type) { switch (type) { case Mode::FP32: return "FP32"; case Mode::FP16: return "FP16"; case Mode::INT8: return "INT8"; default: return "UnknowCompileMode"; } } typedef std::function<void(int current, int count, const std::vector<std::string>& files, std::shared_ptr<Tensor>& tensor)> Int8Process; class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{ public: Int8EntropyCalibrator(const vector<string>& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->allimgs_ = imagefiles; this->preprocess_ = preprocess; this->fromCalibratorData_ = false; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } Int8EntropyCalibrator(const vector<uint8_t>& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->entropyCalibratorData_ = entropyCalibratorData; this->preprocess_ = preprocess; this->fromCalibratorData_ = true; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } virtual ~Int8EntropyCalibrator(){ checkCudaRuntime(cudaStreamDestroy(stream_)); } int getBatchSize() const noexcept { return dims_.d[0]; } bool next() { int batch_size = dims_.d[0]; if (cursor_ + batch_size > allimgs_.size()) return false; int old_cursor = cursor_; for(int i = 0; i < batch_size; ++i) files_[i] = allimgs_[cursor_++]; if (!tensor_){ tensor_.reset(new Tensor(dims_.nbDims, dims_.d)); tensor_->set_stream(stream_); tensor_->set_workspace(make_shared<MixMemory>()); } preprocess_(old_cursor, allimgs_.size(), files_, tensor_); return true; } bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept { if (!next()) return false; bindings[0] = tensor_->gpu(); return true; } const vector<uint8_t>& getEntropyCalibratorData() { return entropyCalibratorData_; } const void* readCalibrationCache(size_t& length) noexcept { if (fromCalibratorData_) { length = this->entropyCalibratorData_.size(); return this->entropyCalibratorData_.data(); } length = 0; return nullptr; } virtual void writeCalibrationCache(const void* cache, size_t length) noexcept { entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length); } private: Int8Process preprocess_; vector<string> allimgs_; size_t batchCudaSize_ = 0; int cursor_ = 0; nvinfer1::Dims dims_; vector<string> files_; shared_ptr<Tensor> tensor_; vector<uint8_t> entropyCalibratorData_; bool fromCalibratorData_ = false; cudaStream_t stream_ = nullptr; }; bool compile( Mode mode, Type type, unsigned int max_batch_size, const string& source_onnx, const string& saveto, size_t max_workspace_size, const std::string& int8_images_folder, const std::string& int8_entropy_calibrator_cache_file) { bool hasEntropyCalibrator = false; vector<uint8_t> entropyCalibratorData; vector<string> entropyCalibratorFiles; auto int8process = [=](int current, int count, const vector<string>& files, shared_ptr<Tensor>& tensor){ for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; INFO("Int8 load %d / %d, %s", current + i + 1, count, file.c_str()); auto image = cv::imread(file); if(image.empty()){ INFOE("Load image failed, %s", file.c_str()); continue; } image_to_tensor(image, tensor, type, i); } tensor->synchronize(); }; if (mode == Mode::INT8) { if (!int8_entropy_calibrator_cache_file.empty()) { if (exists(int8_entropy_calibrator_cache_file)) { entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file); if (entropyCalibratorData.empty()) { INFOE("entropyCalibratorFile is set as: %s, but we read is empty.", int8_entropy_calibrator_cache_file.c_str()); return false; } hasEntropyCalibrator = true; } } if (hasEntropyCalibrator) { if (!int8_images_folder.empty()) { INFOW("int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set"); } } else { entropyCalibratorFiles = glob_image_files(int8_images_folder); if (entropyCalibratorFiles.empty()) { INFOE("Can not find any images(jpg/png/bmp/jpeg/tiff) from directory: %s", int8_images_folder.c_str()); return false; } if(entropyCalibratorFiles.size() < max_batch_size){ INFOW("Too few images provided, %d[provided] < %d[max batch size], image copy will be performed", entropyCalibratorFiles.size(), max_batch_size); int old_size = entropyCalibratorFiles.size(); for(int i = old_size; i < max_batch_size; ++i) entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % old_size]); } } } else { if (hasEntropyCalibrator) { INFOW("int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'", mode_string(mode)); } } INFO("Compile %s %s.", mode_string(mode), source_onnx.c_str()); shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroy_nvidia_pointer<IBuilder>); if (builder == nullptr) { INFOE("Can not create builder."); return false; } shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroy_nvidia_pointer<IBuilderConfig>); if (mode == Mode::FP16) { if (!builder->platformHasFastFp16()) { INFOW("Platform not have fast fp16 support"); } config->setFlag(BuilderFlag::kFP16); } else if (mode == Mode::INT8) { if (!builder->platformHasFastInt8()) { INFOW("Platform not have fast int8 support"); } config->setFlag(BuilderFlag::kINT8); } shared_ptr<INetworkDefinition> network; shared_ptr<nvonnxparser::IParser> onnxParser; const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer<INetworkDefinition>); //from onnx is not markOutput onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer<nvonnxparser::IParser>); if (onnxParser == nullptr) { INFOE("Can not create parser."); return false; } if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) { INFOE("Can not parse OnnX file: %s", source_onnx.c_str()); return false; } auto inputTensor = network->getInput(0); auto inputDims = inputTensor->getDimensions(); shared_ptr<Int8EntropyCalibrator> int8Calibrator; if (mode == Mode::INT8) { auto calibratorDims = inputDims; calibratorDims.d[0] = max_batch_size; if (hasEntropyCalibrator) { INFO("Using exist entropy calibrator data[%d bytes]: %s", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorData, calibratorDims, int8process )); } else { INFO("Using image list[%d files]: %s", entropyCalibratorFiles.size(), int8_images_folder.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorFiles, calibratorDims, int8process )); } config->setInt8Calibrator(int8Calibrator.get()); } INFO("Input shape is %s", join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims)).c_str()); INFO("Set max batch size = %d", max_batch_size); INFO("Set max workspace size = %.2f MB", max_workspace_size / 1024.0f / 1024.0f); int net_num_input = network->getNbInputs(); INFO("Network has %d inputs:", net_num_input); vector<string> input_names(net_num_input); for(int i = 0; i < net_num_input; ++i){ auto tensor = network->getInput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); input_names[i] = tensor->getName(); } int net_num_output = network->getNbOutputs(); INFO("Network has %d outputs:", net_num_output); for(int i = 0; i < net_num_output; ++i){ auto tensor = network->getOutput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); } int net_num_layers = network->getNbLayers(); INFO("Network has %d layers", net_num_layers); builder->setMaxBatchSize(max_batch_size); config->setMaxWorkspaceSize(max_workspace_size); auto profile = builder->createOptimizationProfile(); for(int i = 0; i < net_num_input; ++i){ auto input = network->getInput(i); auto input_dims = input->getDimensions(); input_dims.d[0] = 1; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims); input_dims.d[0] = max_batch_size; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims); } config->addOptimizationProfile(profile); INFO("Building engine..."); auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer<ICudaEngine>); if (engine == nullptr) { INFOE("engine is nullptr"); return false; } if (mode == Mode::INT8) { if (!hasEntropyCalibrator) { if (!int8_entropy_calibrator_cache_file.empty()) { INFO("Save calibrator to: %s", int8_entropy_calibrator_cache_file.c_str()); save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData()); } else { INFO("No set entropyCalibratorFile, and entropyCalibrator will not save."); } } } auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); INFO("Build done %lld ms !", time_end - time_start); // serialize the engine, then close everything down shared_ptr<IHostMemory> seridata(engine->serialize(), destroy_nvidia_pointer<IHostMemory>); return save_file(saveto, seridata->data(), seridata->size()); } };
the_stack
\brief Defines a math function */ #include <algorithm> #include <stdexcept> #include <iomanip> #include <cstring> #include <fstream> #include <sstream> #ifdef __unix__ #include <unistd.h> #elif defined(_WIN32) || defined(WIN32) #include <windows.h> #else // sleep not supported #endif #include "options.h" #include "operation_profiler.h" #include "gpu_timer.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { /////////////////////////////////////////////////////////////////////////////////////////////////// OperationProfiler::OperationProfiler(): kind_(library::OperationKind::kInvalid) { } /// Ctor OperationProfiler::OperationProfiler( Options const &options, library::OperationKind kind, ArgumentDescriptionVector const &arguments, ProviderVector const & verification_providers ): kind_(kind), arguments_(arguments) { ArgumentDescriptionVector tile_description_arguments{ {ArgumentTypeID::kEnumerated, {"op_class", "opcode-class"}, "Class of math instruction (simt, tensorop, wmmatensorop, wmma)"}, {ArgumentTypeID::kEnumerated, {"accum", "accumulator-type"}, "Math instruction accumulator data type"}, {ArgumentTypeID::kInteger, {"cta_m", "threadblock-shape::m"}, "Threadblock shape in the M dimension"}, {ArgumentTypeID::kInteger, {"cta_n", "threadblock-shape::n"}, "Threadblock shape in the N dimension"}, {ArgumentTypeID::kInteger, {"cta_k", "threadblock-shape::k"}, "Threadblock shape in the K dimension"}, {ArgumentTypeID::kInteger, {"stages", "threadblock-stages"}, "Number of stages of threadblock-scoped matrix multiply"}, {ArgumentTypeID::kInteger, {"warps_m", "warp-count::m"}, "Number of warps within threadblock along the M dimension"}, {ArgumentTypeID::kInteger, {"warps_n", "warp-count::n"}, "Number of warps within threadblock along the N dimension"}, {ArgumentTypeID::kInteger, {"warps_k", "warp-count::k"}, "Number of warps within threadblock along the K dimension"}, {ArgumentTypeID::kInteger, {"inst_m", "instruction-shape::m"}, "Math instruction shape in the M dimension"}, {ArgumentTypeID::kInteger, {"inst_n", "instruction-shape::n"}, "Math instruction shape in the N dimension"}, {ArgumentTypeID::kInteger, {"inst_k", "instruction-shape::k"}, "Math instruction shape in the K dimension"}, {ArgumentTypeID::kInteger, {"min_cc", "minimum-compute-capability"}, "Minimum device compute capability"}, {ArgumentTypeID::kInteger, {"max_cc", "maximum-compute-capability"}, "Maximum device compute capability"} }; arguments_.insert(arguments_.end(), tile_description_arguments.begin(), tile_description_arguments.end()); for (auto provider : verification_providers) { if (std::find( options.verification.providers.begin(), options.verification.providers.end(), provider) != options.verification.providers.end()) { verification_providers_.push_back(provider); } } } /// Destructor OperationProfiler::~OperationProfiler() { } /// Gets the schema description std::string const & OperationProfiler::description() const { return description_; } /// Prints usage statement for the math function void OperationProfiler::print_usage(std::ostream &out) const { for (auto const & desc : arguments_) { size_t const kAliasStart = 10; size_t columns = 0; std::string type_str = to_string(desc.type); columns += type_str.size(); out << " [" << type_str << "]"; if (columns < kAliasStart) { out << std::string(kAliasStart - columns, ' '); } columns = 0; int j = 0; for (auto const & alias : desc.aliases) { columns += alias.size() + (j ? 1 : 0) + 2; out << (j++ ? "," : "") << "--" << alias; } size_t const kTotalColumns = 50; if (columns < kTotalColumns) { out << std::string(kTotalColumns - columns, ' '); } out << desc.description << "\n"; } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if the current operation description satisfies the problem space bool OperationProfiler::satisfies( library::OperationDescription const &op_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::OpcodeClassID opcode_class; if (arg_as_OpcodeClassID(opcode_class, "op_class", problem_space, problem)) { if (opcode_class != op_desc.tile_description.math_instruction.opcode_class) { return false; } } int64_t int_value; if (arg_as_int(int_value, "inst_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "stages", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_stages) != int_value) { return false; } } if (arg_as_int(int_value, "warps_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.m()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.n()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.k()) != int_value) { return false; } } library::NumericTypeID numeric_type; if (arg_as_NumericTypeID(numeric_type, "accum", problem_space, problem)) { if (numeric_type != op_desc.tile_description.math_instruction.element_accumulator) { return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to profile all operations in the manifest int OperationProfiler::profile_all( Options const &options, library::Manifest const &manifest, DeviceContext &device_context) { ProblemSpace problem_space(arguments_, options.cmdline); // 1. Construct performance report PerformanceReport report(options, problem_space.argument_names(), kind_); // 2. For each problem in problem space ProblemSpace::Iterator problem_it = problem_space.begin(); ProblemSpace::Iterator problem_end = problem_space.end(); bool continue_profiling = true, internal_error = false; // For each problem in problem space for (; continue_profiling && problem_it != problem_end; ++problem_it) { ProblemSpace::Problem problem = problem_it.at(); report.next_problem(); // For each operation in manifest for (auto const & operation_ptr : manifest) { library::Operation const *operation = operation_ptr.get(); auto min_cc = operation->description().tile_description.minimum_compute_capability; auto max_cc = operation->description().tile_description.maximum_compute_capability; // Execute compatible cutlass operations if they satisfy the current device's compute capability if (operation->description().kind == kind_ && operation->description().provider == library::Provider::kCUTLASS && options.device.compute_capability() >= min_cc && options.device.compute_capability() <= max_cc) { std::string operation_name(operation->description().name); // Filter kernels by name bool filtered_by_name = options.operation_names.empty(); if (!filtered_by_name) { for (auto const & op_name : options.operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = true; break; } } } for (auto const & op_name : options.excluded_operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = false; break; } } if (!filtered_by_name || !satisfies(operation->description(), problem_space, problem)) { continue; } // A. Initialize configuration Status status = this->initialize_configuration( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // Stop profiling if there was an internal error internal_error = true; break; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } if (continue_profiling) { status = this->initialize_workspace( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // Stop profiling if there was an internal error internal_error = true; break; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } } // // Profile CUTLASS if it is enabled // // B. Verify CUTLASS if (continue_profiling && options.profiling.provider_enabled(library::Provider::kCUTLASS)) { continue_profiling = this->verify_cutlass( options, report, device_context, operation, problem_space, problem); } if (options.execution_mode == ExecutionMode::kDryRun) { report.append_results(results_); results_.clear(); continue; } // // C. Optionally save workspace // if (options.verification.save_workspace == SaveWorkspace::kAlways) { save_workspace( device_context, options, operation->description(), library::Provider::kCUTLASS); } // // D. Profile // if (continue_profiling && options.profiling.enabled) { continue_profiling = this->profile( options, report, device_context, operation, problem_space, problem); } // Clear named allocations device_context.free(); report.append_results(results_); results_.clear(); } if (!continue_profiling) { break; } } } return internal_error ? 1 : 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sleep for a given duration in ms void OperationProfiler::sleep(int sleep_duration) { if (sleep_duration) { #ifdef __unix__ usleep(sleep_duration * 1000); #elif defined(_WIN32) || defined(WIN32) SleepEx(sleep_duration, false); #else // sleep not supported #endif } } /// Compares tensors for equality Disposition OperationProfiler::compare_tensors( Options const &options, DeviceAllocation &experimental, DeviceAllocation &reference, int64_t count) { if (experimental.type() != reference.type()) { return Disposition::kIncorrect; } bool passed = false; if (count == 0) { count = reference.capacity(); } if (options.verification.epsilon == 0) { // bit-level equality passed = DeviceAllocation::block_compare_equal( experimental.type(), experimental.data(), reference.data(), count); } else { // relative error function passed = DeviceAllocation::block_compare_relatively_equal( experimental.type(), experimental.data(), reference.data(), count, options.verification.epsilon, options.verification.nonzero_floor); } return passed ? Disposition::kPassed : Disposition::kIncorrect; } /// Saves the workspace void OperationProfiler::save_workspace( DeviceContext &device_context, Options const &options, library::OperationDescription const &desc, library::Provider provider, library::Provider verification_provider) { for (auto const & named_allocation : device_context) { DeviceAllocation *allocation = named_allocation.second; std::stringstream filename; filename << desc.name << "_" << library::to_string(provider) << "_"; if (verification_provider != library::Provider::kInvalid) { filename << "verified_by_" << library::to_string(verification_provider) << "_"; } filename << named_allocation.first + ".mat"; std::ofstream out(filename.str()); allocation->write_tensor_csv(out); out << "\n"; if (options.report.verbose) { std::cout << "wrote '" << filename.str() << "'" << std::endl; } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Method to profile a CUTLASS Operation Status OperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sets operation description void OperationProfiler::initialize_result_( PerformanceResult &result, library::OperationDescription const &operation_desc, ProblemSpace const &problem_space) { set_argument(result, "op_class", problem_space, library::to_string(operation_desc.tile_description.math_instruction.opcode_class)); set_argument(result, "accum", problem_space, library::to_string(operation_desc.tile_description.math_instruction.element_accumulator)); set_argument(result, "cta_m", problem_space, operation_desc.tile_description.threadblock_shape.m()); set_argument(result, "cta_n", problem_space, operation_desc.tile_description.threadblock_shape.n()); set_argument(result, "cta_k", problem_space, operation_desc.tile_description.threadblock_shape.k()); set_argument(result, "stages", problem_space, operation_desc.tile_description.threadblock_stages); set_argument(result, "warps_m", problem_space, operation_desc.tile_description.warp_count.m()); set_argument(result, "warps_n", problem_space, operation_desc.tile_description.warp_count.n()); set_argument(result, "warps_k", problem_space, operation_desc.tile_description.warp_count.k()); set_argument(result, "inst_m", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.m()); set_argument(result, "inst_n", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.n()); set_argument(result, "inst_k", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.k()); set_argument(result, "min_cc", problem_space, operation_desc.tile_description.minimum_compute_capability); set_argument(result, "max_cc", problem_space, operation_desc.tile_description.maximum_compute_capability); } /// Helper void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, std::string const &value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), value); } void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, int64_t value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), library::lexical_cast(value)); } /// finds string matches filter_string in operation_name bool OperationProfiler::find_string_matches_( std::string const &filter_string, std::string const &operation_name) { // Returns true if all substrings appear in the operation_name in order // Split filter_string of the format "gemm*f32*nt" to tokens ["gemm", "f32", "nt"] std::string item; std::istringstream iss(filter_string); std::vector<std::string> filter_tokens; while (std::getline(iss, item, '*')) { filter_tokens.push_back(item); } // Search filter_tokens in operation_name in order size_t start = 0, idx = 0; for(auto & token : filter_tokens) { // Check if characters left to be parsed in operation_name if (start < operation_name.length()) { // Find token in operation_name[start:] idx = operation_name.substr(start).find(token); if (idx == std::string::npos) { return false; } } start += (idx + token.length()); } // All tokens in filter_string found in operation_name return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
//////////////////////////////////////////////////////////////////////////// // B spline smoothing kernel //////////////////////////////////////////////////////////////////////////// __device__ double W(double3 p_pos, double3 q_pos, double h) { double r = sqrt((p_pos.x-q_pos.x)*(p_pos.x-q_pos.x) + (p_pos.y-q_pos.y)*(p_pos.y-q_pos.y) + (p_pos.z-q_pos.z)*(p_pos.z-q_pos.z)); double C = 1.0/(M_PI*h*h*h); double u = r/h; double val = 0.0; if(u >= 2.0) return val; else if(u < 1.0 ) val = 1.0 - (3.0/2.0)*u*u + (3.0/4.0)*u*u*u; else if(u >= 1.0 && u < 2.0) val = (1.0/4.0) * pow(2.0-u,3.0); val *= C; return val; } // Gradient of B spline kernel __device__ double del_W(double3 p_pos, double3 q_pos, double h) { double r = sqrt((p_pos.x-q_pos.x)*(p_pos.x-q_pos.x) + (p_pos.y-q_pos.y)*(p_pos.y-q_pos.y) + (p_pos.z-q_pos.z)*(p_pos.z-q_pos.z)); double C = 1.0/(M_PI * h*h*h); double u = r/h; double val = 0.0; if(u >= 2.0) return val; else if(u < 1.0 ) val = -1.0/(h*h) * (3.0 - 9.0/4.0*u); else if(u >= 1.0 && u < 2.0) val = -3.0/(4.0*h*r) * pow(2.0-u,2.0); val *= C; return val; } //////////////////////////////////////////////////////////////////////////// // Boundary particle force // http://iopscience.iop.org/0034-4885/68/8/R01/pdf/0034-4885_68_8_R01.pdf //////////////////////////////////////////////////////////////////////////// __device__ double boundaryGamma(double3 p_pos, double3 k_pos, double3 k_n, double h, double speed_sound) { // Radial distance between p,q double r = sqrt((p_pos.x-k_pos.x)*(p_pos.x-k_pos.x) + (p_pos.y-k_pos.y)*(p_pos.y-k_pos.y) + (p_pos.z-k_pos.z)*(p_pos.z-k_pos.z)); // Distance to p normal to surface particle double y = sqrt((p_pos.x-k_pos.x)*(p_pos.x-k_pos.x)*(k_n.x*k_n.x) + (p_pos.y-k_pos.y)*(p_pos.y-k_pos.y)*(k_n.y*k_n.y) + (p_pos.z-k_pos.z)*(p_pos.z-k_pos.z)*(k_n.z*k_n.z)); // Tangential distance double x = r-y; double u = y/h; double xi = (1-x/h)?x<h:0.0; double C = xi*2.0*0.02 * speed_sound * speed_sound / y; double val = 0.0; if(u > 0.0 && u < 2.0/3.0) val = 2.0/3.0; else if(u < 1.0 && u > 2.0/3.0 ) val = (2*u - 3.0/2.0*u*u); else if (u < 2.0 && u > 1.0) val = 0.5*(2.0-u)*(2.0-u); else val = 0.0; val *= C; return val; } //////////////////////////////////////////////////////////////////////////// // Particle attribute computations //////////////////////////////////////////////////////////////////////////// __device__ double computeDensity(double3 p_pos, double3 p_v, double3 q_pos, double3 q_v, param *params) { double v_x = (p_v.x - q_v.x); double v_y = (p_v.y - q_v.y); double v_z = (p_v.z - q_v.z); double density = params->mass_particle * del_W(p_pos,q_pos, params->smoothing_radius); double density_x = density * v_x * (p_pos.x - q_pos.x); double density_y = density * v_y * (p_pos.y - q_pos.y); double density_z = density * v_z * (p_pos.z - q_pos.z); density = (density_x + density_y + density_z)*params->time_step; return density; } __device__ double computePressure(double p_density, param *params) { double gam = 7.0; double B = params->rest_density * params->speed_sound*params->speed_sound / gam; double pressure = B * (pow((p_density/params->rest_density),gam) - 1.0); return pressure; } __global__ void updatePressures(fluid_particle *fluid_particles, param *params) { int num_fluid_particles = params->number_fluid_particles; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_fluid_particles) return; double3 p_pos = fluid_particles[i].pos; double3 p_v = fluid_particles[i].v; double density = fluid_particles[i].density; for(int j=0; j< num_fluid_particles; j++) { double3 q_pos = fluid_particles[j].pos; double3 q_v = fluid_particles[j].v; density += computeDensity(p_pos,p_v,q_pos,q_v, params); } fluid_particles[i].density = density; fluid_particles[i].pressure = computePressure(density, params); } __device__ double3 computeBoundaryAcceleration(double3 p_pos, double3 k_pos, double3 k_n, double h, double speed_sound) { double3 p_a; double bGamma = boundaryGamma(p_pos,k_pos,k_n,h,speed_sound); p_a.x = bGamma * k_n.x; p_a.y = bGamma * k_n.y; p_a.z = bGamma * k_n.z; return p_a; } __device__ double3 computeAcceleration(double3 p_pos, double3 p_v, double p_density, double p_pressure, double3 q_pos, double3 q_v, double q_density, double q_pressure, const param *const params) { double3 a; double accel; double h = params->smoothing_radius; double alpha = params->alpha; double speed_sound = params->speed_sound; double mass_particle = params->mass_particle; double surface_tension = params->surface_tension; // Pressure force accel = (p_pressure/(p_density*p_density) + q_pressure/(q_density*q_density)) * mass_particle * del_W(p_pos,q_pos,h); a.x = -accel * (p_pos.x - q_pos.x); a.y = -accel * (p_pos.y - q_pos.y); a.z = -accel * (p_pos.z - q_pos.z); // Viscosity force double VdotR = (p_v.x-q_v.x)*(p_pos.x-q_pos.x) + (p_v.y-q_v.y)*(p_pos.y-q_pos.y) + (p_v.z-q_v.z)*(p_pos.z-q_pos.z); if(VdotR < 0.0) { double nu = 2.0 * alpha * h * speed_sound / (p_density + q_density); double r2 = (p_pos.x-q_pos.x)*(p_pos.x-q_pos.x) + (p_pos.y-q_pos.y)*(p_pos.y-q_pos.y) + (p_pos.z-q_pos.z)*(p_pos.z-q_pos.z); double eps = h/10.0; double stress = nu * VdotR / (r2 + eps*h*h); accel = mass_particle * stress * del_W(p_pos, q_pos, h); a.x += accel * (p_pos.x - q_pos.x); a.y += accel * (p_pos.y - q_pos.y); a.z += accel * (p_pos.z - q_pos.z); } //Surface tension // BT 07 http://cg.informatik.uni-freiburg.de/publications/2011_GRAPP_airBubbles.pdf accel = surface_tension * W(p_pos,q_pos,h); a.x += accel * (p_pos.x - q_pos.x); a.y += accel * (p_pos.y - q_pos.y); a.z += accel * (p_pos.z - q_pos.z); return a; } // Update particle acclerations __global__ void updateAccelerationsFP(fluid_particle *fluid_particles, param *params) { int num_fluid_particles = params->number_fluid_particles; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_fluid_particles) return; double ax = 0.0; double ay = 0.0; double az = -9.8; double3 p_pos = fluid_particles[i].pos; double3 p_v = fluid_particles[i].v; double p_density = fluid_particles[i].density; double p_pressure = fluid_particles[i].pressure; for(int j=0; j<num_fluid_particles; j++) { if (i!=j) { double3 q_pos = fluid_particles[j].pos; double3 q_v = fluid_particles[j].v; double q_density = fluid_particles[j].density; double q_pressure = fluid_particles[j].pressure; double3 tmp_a = computeAcceleration(p_pos, p_v, p_density, p_pressure, q_pos, q_v, q_density, q_pressure, params); ax += tmp_a.x; ay += tmp_a.y; az += tmp_a.z; } } fluid_particles[i].a.x = ax; fluid_particles[i].a.y = ay; fluid_particles[i].a.z = az; } __global__ void updateAccelerationsBP(fluid_particle *fluid_particles, boundary_particle *boundary_particles, param *params) { int num_fluid_particles = params->number_fluid_particles; int num_boundary_particles = params->number_boundary_particles; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_fluid_particles) return; double ax = fluid_particles[i].a.x; double ay = fluid_particles[i].a.y; double az = fluid_particles[i].a.z; double3 p_pos = fluid_particles[i].pos; for (int j=0; j<num_boundary_particles; j++) { double3 k_pos = boundary_particles[j].pos; double3 k_n = boundary_particles[j].n; double3 tmp_a = computeBoundaryAcceleration(p_pos,k_pos,k_n, params->smoothing_radius, params->speed_sound); ax += tmp_a.x; ay += tmp_a.y; az += tmp_a.z; } fluid_particles[i].a.x = ax; fluid_particles[i].a.y = ay; fluid_particles[i].a.z = az; } // Update particle positions // Leap Frog integration with v(t+1) estimated __global__ void updatePositions(fluid_particle *fluid_particles, param *params) { double dt = params->time_step; int num_fluid_particles = params->number_fluid_particles; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_fluid_particles) return; // Velocity at t + dt/2 double3 v_half = fluid_particles[i].v_half; double3 v = fluid_particles[i].v; double3 pos = fluid_particles[i].pos; double3 a = fluid_particles[i].a; v_half.x = v_half.x + dt * a.x; v_half.y = v_half.y + dt * a.y; v_half.z = v_half.z + dt * a.z; // Velocity at t + dt, must estimate for foce calc v.x = v_half.x + a.x * (dt / 2.0); v.y = v_half.y + a.y * (dt / 2.0); v.z = v_half.z + a.z * (dt / 2.0); // Position at time t + dt pos.x = pos.x + dt * v_half.x; pos.y = pos.y + dt * v_half.y; pos.z = pos.z + dt * v_half.z; fluid_particles[i].v_half = v_half; fluid_particles[i].v = v; fluid_particles[i].pos = pos; } // Seed simulation with Euler step v(t-dt/2) needed by leap frog integrator // Should calculate all accelerations but assuming just g simplifies acc port void eulerStart(fluid_particle* fluid_particles, boundary_particle* boundary_particles, param *params) { // Set V (t0 - dt/2) double dt_half = params->time_step/2.0; for(int i=0; i<params->number_fluid_particles; i++) { // Velocity at t + dt/2 double3 v = fluid_particles[i].v; double3 v_half; v_half.x = v.x; v_half.y = v.y; v_half.z = v.z - params->g * dt_half; fluid_particles[i].v_half = v_half; } } // Initialize particles void initParticles(fluid_particle** fluid_particles, boundary_particle** boundary_particles, AABB* water, AABB* boundary, param* params) { // Allocate fluid particles array *fluid_particles = (fluid_particle*) malloc(params->number_fluid_particles * sizeof(fluid_particle)); // Allocate boundary particles array *boundary_particles = (boundary_particle*) malloc(params->number_boundary_particles * sizeof(boundary_particle)); double spacing = params->spacing_particle; // Initialize particle values for(int i=0; i<params->number_fluid_particles; i++) { (*fluid_particles)[i].a.x = 0.0; (*fluid_particles)[i].a.y = 0.0; (*fluid_particles)[i].a.z = 0.0; (*fluid_particles)[i].v.x = 0.0; (*fluid_particles)[i].v.y = 0.0; (*fluid_particles)[i].v.z = 0.0; (*fluid_particles)[i].density = params->rest_density; } // Place particles inside bounding volume double x,y,z; int i = 0; for(z=water->min_z; z<=water->max_z; z+=spacing) { for(y=water->min_y; y<=water->max_y; y+=spacing) { for(x=water->min_x; x<=water->max_x; x+=spacing) { if(i < params->number_fluid_particles) { (*fluid_particles)[i].pos.x = x; (*fluid_particles)[i].pos.y = y; (*fluid_particles)[i].pos.z = z; i++; } } } } params->number_fluid_particles = i; // Construct bounding box constructBoundaryBox(*boundary_particles, boundary, params); } void initParams(AABB* water_volume, AABB* boundary_volume, param* params) { // Boundary box boundary_volume->min_x = 0.0; boundary_volume->max_x = 1.1; boundary_volume->min_y = 0.0; boundary_volume->max_y = 1.1; boundary_volume->min_z = 0.0; boundary_volume->max_z = 1.1; // water volume water_volume->min_x = 0.1; water_volume->max_x = 0.5; water_volume->min_y = 0.1; water_volume->max_y = 0.5; water_volume->min_z = 0.08; water_volume->max_z = 0.8; // Simulation parameters params->number_fluid_particles = 2048; params->rest_density = 1000.0; params->g = 9.8; params->alpha = 0.02; params->surface_tension = 0.01; params->number_steps = 500; // reduce from 5000 params->time_step = 0.00035; // Mass of each particle double volume = (water_volume->max_x - water_volume->min_x) * (water_volume->max_y - water_volume->min_y) * (water_volume->max_z - water_volume->min_z); params->mass_particle = params->rest_density * (volume/params->number_fluid_particles); // Cube calculated spacing params->spacing_particle = pow(volume/params->number_fluid_particles,1.0/3.0); // Smoothing radius, h params->smoothing_radius = params->spacing_particle; // Boundary particles int num_x = ceil((boundary_volume->max_x - boundary_volume->min_x)/params->spacing_particle); int num_y = ceil((boundary_volume->max_y - boundary_volume->min_y)/params->spacing_particle); int num_z = ceil((boundary_volume->max_z - boundary_volume->min_z)/params->spacing_particle); int num_boundary_particles = (2 * num_x * num_z) + (2 * num_y * num_z) + (2* num_y * num_z); params->number_boundary_particles = num_boundary_particles; // Total number of particles params->number_particles = params->number_boundary_particles + params->number_fluid_particles; // Number of steps before frame needs to be written for 30 fps params->steps_per_frame = (int)(1.0/(params->time_step*30.0)); // Calculate speed of sound for simulation double max_height = water_volume->max_y; double max_velocity = sqrt(2.0*params->g*max_height); params->speed_sound = max_velocity/sqrt(0.01); // Minimum stepsize from Courant-Friedrichs-Lewy condition double recomend_step = 0.4 * params->smoothing_radius / (params->speed_sound * (1+ 0.6*params->alpha)); printf("Using time step: %f, Minimum recomended %f\n",params->time_step, recomend_step); } void finalizeParticles(fluid_particle *fluid_particles, boundary_particle *boundary_particles) { free(fluid_particles); free(boundary_particles); } int main(int argc, char *argv[]) { param params; AABB water_volume; AABB boundary_volume; fluid_particle *fluid_particles = NULL; boundary_particle *boundary_particles = NULL; initParams(&water_volume, &boundary_volume, &params); initParticles(&fluid_particles, &boundary_particles, &water_volume, &boundary_volume, &params); eulerStart(fluid_particles, boundary_particles, &params); int num_fluid_particles = params.number_fluid_particles; int num_boundary_particles = params.number_boundary_particles; fluid_particle *d_fluid_particles; boundary_particle *d_boundary_particles; param *d_params; hipMalloc((void**)&d_fluid_particles, num_fluid_particles * sizeof(fluid_particle)); hipMalloc((void**)&d_boundary_particles, num_boundary_particles * sizeof(boundary_particle)); hipMalloc((void**)&d_params, sizeof(param)); hipMemcpy(d_fluid_particles, fluid_particles, num_fluid_particles * sizeof(fluid_particle), hipMemcpyHostToDevice); hipMemcpy(d_boundary_particles, boundary_particles, num_boundary_particles * sizeof(boundary_particle), hipMemcpyHostToDevice); hipMemcpy(d_params, &params, sizeof(param), hipMemcpyHostToDevice); dim3 block1D(256); dim3 grid1D_FP((num_fluid_particles + 255)/256); dim3 grid1D_BP((num_boundary_particles + 255)/256); // Main simulation loop for(int n=0; n<params.number_steps; n++) { hipLaunchKernelGGL(updatePressures, dim3(grid1D_FP), dim3(block1D), 0, 0, d_fluid_particles, d_params); hipLaunchKernelGGL(updateAccelerationsFP, dim3(grid1D_FP), dim3(block1D), 0, 0, d_fluid_particles, d_params); hipLaunchKernelGGL(updateAccelerationsBP, dim3(grid1D_BP), dim3(block1D), 0, 0, d_fluid_particles, d_boundary_particles, d_params); hipLaunchKernelGGL(updatePositions, dim3(grid1D_FP), dim3(block1D), 0, 0, d_fluid_particles, d_params); //hipDeviceSynchronize(); } hipMemcpy(fluid_particles, d_fluid_particles, num_fluid_particles * sizeof(fluid_particle), hipMemcpyDeviceToHost); writeFile(fluid_particles, &params); finalizeParticles(fluid_particles, boundary_particles); hipFree(d_fluid_particles); hipFree(d_boundary_particles); hipFree(d_params); return 0; }
the_stack
//#include "util.h" #include "util.cuh" #include "util_type.h" #include "util_type_internal.h" #include "util_func.h" #include "update_ops_cuda.h" #include "update_ops_cuda_device_functions.h" #include <assert.h> __constant__ GTYPE matrix_const_gpu[4]; __constant__ UINT insert_index_list_gpu[30]; __host__ void single_qubit_Pauli_gate_host(UINT target_qubit_index, UINT Pauli_operator_type, void* state, ITYPE dim, void* stream, unsigned int device_number) { switch (Pauli_operator_type) { case 0: break; case 1: X_gate_host(target_qubit_index, state, dim, stream, device_number); break; case 2: Y_gate_host(target_qubit_index, state, dim, stream, device_number); break; case 3: Z_gate_host(target_qubit_index, state, dim, stream, device_number); break; default: fprintf(stderr, "invalid Pauli operation is called"); assert(0); } } __host__ void single_qubit_Pauli_rotation_gate_host(unsigned int target_qubit_index, unsigned int op_idx, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number) { GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); CPPCTYPE PAULI_MATRIX[4][4] = { { CPPCTYPE(1, 0), CPPCTYPE(0, 0), CPPCTYPE(0, 0), CPPCTYPE(1, 0) }, { CPPCTYPE(0, 0), CPPCTYPE(1, 0), CPPCTYPE(1, 0), CPPCTYPE(0, 0) }, { CPPCTYPE(0, 0), CPPCTYPE(0, -1), CPPCTYPE(0, 1), CPPCTYPE(0, 0) }, { CPPCTYPE(1, 0), CPPCTYPE(0, 0), CPPCTYPE(0, 0), CPPCTYPE(-1, 0) } }; CPPCTYPE rotation_gate[4]; rotation_gate[0] = CPPCTYPE( cos(angle / 2) - sin(angle / 2)* PAULI_MATRIX[op_idx][0].imag(), sin(angle / 2) * PAULI_MATRIX[op_idx][0].real() ); rotation_gate[1] = CPPCTYPE( -sin(angle / 2)* PAULI_MATRIX[op_idx][1].imag(), sin(angle / 2) * PAULI_MATRIX[op_idx][1].real() ); rotation_gate[2] = CPPCTYPE( -sin(angle / 2)* PAULI_MATRIX[op_idx][2].imag(), sin(angle / 2) * PAULI_MATRIX[op_idx][2].real() ); rotation_gate[3] = CPPCTYPE( cos(angle / 2) - sin(angle / 2)* PAULI_MATRIX[op_idx][3].imag(), sin(angle / 2) * PAULI_MATRIX[op_idx][3].real() ); single_qubit_dense_matrix_gate_host(target_qubit_index, rotation_gate, state_gpu, dim, stream, device_number); state = reinterpret_cast<void*>(state_gpu); } __device__ void single_qubit_dense_matrix_gate_device(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim){ ITYPE basis0, basis1; ITYPE half_dim = dim >> 1; GTYPE tmp; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; if (j < half_dim){ basis0 = (j >> target_qubit_index); basis0 = basis0 << (target_qubit_index + 1); basis0 += j & ((1ULL << target_qubit_index) - 1); basis1 = basis0 ^ (1ULL << target_qubit_index); tmp = state_gpu[basis0]; state_gpu[basis0] = cuCadd(cuCmul(matrix_const_gpu[0], tmp), cuCmul(matrix_const_gpu[1], state_gpu[basis1])); state_gpu[basis1] = cuCadd(cuCmul(matrix_const_gpu[2], tmp), cuCmul(matrix_const_gpu[3], state_gpu[basis1])); } } __global__ void single_qubit_dense_matrix_gate_gpu(GTYPE mat0, GTYPE mat1, GTYPE mat2, GTYPE mat3, unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim){ ITYPE basis0, basis1; ITYPE half_dim = dim >> 1; GTYPE tmp0, tmp1; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; if (j < half_dim){ basis0 = (j >> target_qubit_index); basis0 = basis0 << (target_qubit_index + 1); basis0 += j & ((1ULL << target_qubit_index) - 1); basis1 = basis0 ^ (1ULL << target_qubit_index); tmp0 = state_gpu[basis0]; tmp1 = state_gpu[basis1]; state_gpu[basis0] = cuCadd(cuCmul(mat0, tmp0), cuCmul(mat1, tmp1)); state_gpu[basis1] = cuCadd(cuCmul(mat2, tmp0), cuCmul(mat3, tmp1)); } } __host__ void single_qubit_dense_matrix_gate_host(unsigned int target_qubit_index, const CPPCTYPE matrix[4], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; ITYPE loop_dim = dim >> 1; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; GTYPE mat[4]; for (int i = 0; i < 4; ++i) mat[i] = make_cuDoubleComplex(matrix[i].real(), matrix[i].imag()); single_qubit_dense_matrix_gate_gpu << < grid, block, 0, *cuda_stream >> > (mat[0], mat[1], mat[2], mat[3], target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void single_qubit_diagonal_matrix_gate_device(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if(state_index<dim){ state_gpu[state_index] = cuCmul(matrix_const_gpu[(state_index >> target_qubit_index) & 1], state_gpu[state_index]); } } __global__ void single_qubit_diagonal_matrix_gate_gpu(unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { single_qubit_diagonal_matrix_gate_device(target_qubit_index, state_gpu, dim); } __host__ void single_qubit_diagonal_matrix_gate_host(unsigned int target_qubit_index, const CPPCTYPE diagonal_matrix[2], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, diagonal_matrix, sizeof(GTYPE) * 2, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; single_qubit_diagonal_matrix_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void single_qubit_control_single_qubit_dense_matrix_gate_device(unsigned int control_qubit_index, unsigned int control_value, unsigned int target_qubit_index, GTYPE *state, ITYPE dim) { ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; const ITYPE loop_dim = dim>>2; // target mask const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = (1ULL << control_qubit_index) * control_value; // insert index const unsigned int min_qubit_index = (control_qubit_index<target_qubit_index) ? control_qubit_index : target_qubit_index; const unsigned int max_qubit_index = (control_qubit_index>target_qubit_index) ? control_qubit_index : target_qubit_index; if(state_index<loop_dim){ // create base index ITYPE basis_c_t0 = state_index; basis_c_t0 = insert_zero_to_basis_index_device(basis_c_t0, min_qubit_index); basis_c_t0 = insert_zero_to_basis_index_device(basis_c_t0, max_qubit_index); // flip control basis_c_t0 ^= control_mask; // gather index ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values GTYPE cval_c_t0 = state[basis_c_t0]; GTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = cuCadd(cuCmul(matrix_const_gpu[0], cval_c_t0), cuCmul(matrix_const_gpu[1], cval_c_t1)); state[basis_c_t1] = cuCadd(cuCmul(matrix_const_gpu[2], cval_c_t0), cuCmul(matrix_const_gpu[3], cval_c_t1)); } } __global__ void single_qubit_control_single_qubit_dense_matrix_gate_gpu(unsigned int control_qubit_index, unsigned int control_value, unsigned int target_qubit_index, GTYPE *state_gpu, ITYPE dim) { single_qubit_control_single_qubit_dense_matrix_gate_device(control_qubit_index, control_value, target_qubit_index, state_gpu, dim); } __host__ void single_qubit_control_single_qubit_dense_matrix_gate_host(unsigned int control_qubit_index, unsigned int control_value, unsigned int target_qubit_index, const CPPCTYPE matrix[4], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE) * 4, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); ITYPE quad_dim = dim >> 2; unsigned int block = quad_dim <= 1024 ? quad_dim : 1024; unsigned int grid = quad_dim / block; single_qubit_control_single_qubit_dense_matrix_gate_gpu << <grid, block, 0, *cuda_stream >> > (control_qubit_index, control_value, target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void single_qubit_phase_gate_device(unsigned int target_qubit_index, GTYPE phase, GTYPE *state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // target tmask const ITYPE mask = 1ULL << target_qubit_index; // loop varaibles const ITYPE loop_dim = dim>>1; if(state_index<loop_dim){ // create index ITYPE basis_1 = insert_zero_to_basis_index_device(state_index, target_qubit_index) ^ mask; // set values state_gpu[basis_1] = cuCmul(state_gpu[basis_1], phase); } } __global__ void single_qubit_phase_gate_gpu(unsigned int target_qubit_index, GTYPE phase, GTYPE *state_gpu, ITYPE dim){ single_qubit_phase_gate_device(target_qubit_index, phase, state_gpu, dim); } __host__ void single_qubit_phase_gate_host(unsigned int target_qubit_index, CPPCTYPE phase, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE phase_gtype; cudaError cudaStatus; phase_gtype = make_cuDoubleComplex(phase.real(), phase.imag()); ITYPE half_dim = dim >> 1; unsigned int block = half_dim <= 1024 ? half_dim : 1024; unsigned int grid = half_dim / block; single_qubit_phase_gate_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index, phase_gtype, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void multi_qubit_control_single_qubit_dense_matrix_gate(const ITYPE control_mask, UINT control_qubit_index_count, UINT target_qubit_index, GTYPE *state, ITYPE dim) { ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; // target mask const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE loop_dim = dim >> insert_index_list_count; if(state_index<loop_dim){ // create base index ITYPE basis_c_t0 = state_index; for(UINT cursor = 0 ; cursor < insert_index_list_count ; ++cursor){ basis_c_t0 = insert_zero_to_basis_index_device(basis_c_t0, insert_index_list_gpu[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values GTYPE cval_c_t0 = state[basis_c_t0]; GTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = cuCadd( cuCmul( matrix_const_gpu[0], cval_c_t0), cuCmul( matrix_const_gpu[1], cval_c_t1)); state[basis_c_t1] = cuCadd( cuCmul( matrix_const_gpu[2], cval_c_t0), cuCmul( matrix_const_gpu[3], cval_c_t1)); } } __host__ void multi_qubit_control_single_qubit_dense_matrix_gate_host(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CPPCTYPE matrix[4], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value_gsim(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // control mask ITYPE control_mask = create_control_mask_gsim(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop varaibles const ITYPE loop_dim = dim >> insert_index_list_count; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; checkCudaErrors(cudaMemcpyToSymbol(matrix_const_gpu, matrix, sizeof(GTYPE) * 4), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbol(insert_index_list_gpu, insert_index_list, sizeof(UINT)*insert_index_list_count), __FILE__, __LINE__); multi_qubit_control_single_qubit_dense_matrix_gate << < grid, block, 0, *cuda_stream >> > (control_mask, control_qubit_index_count, target_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__); free(insert_index_list); state = reinterpret_cast<void*>(state_gpu); }
the_stack
#include <raft/random/rng.cuh> #include <rmm/exec_policy.hpp> #include <tuple> namespace cugraph { namespace detail { template <typename vertex_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> shuffle_edgelist_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<vertex_t>&& d_edgelist_majors, rmm::device_uvector<vertex_t>&& d_edgelist_minors, std::optional<rmm::device_uvector<weight_t>>&& d_edgelist_weights) { auto& comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_size = row_comm.get_size(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_size = col_comm.get_size(); rmm::device_uvector<vertex_t> d_rx_edgelist_majors(0, handle.get_stream()); rmm::device_uvector<vertex_t> d_rx_edgelist_minors(0, handle.get_stream()); std::optional<rmm::device_uvector<weight_t>> d_rx_edgelist_weights{std::nullopt}; if (d_edgelist_weights) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( d_edgelist_majors.begin(), d_edgelist_minors.begin(), (*d_edgelist_weights).begin())); std::forward_as_tuple( std::tie(d_rx_edgelist_majors, d_rx_edgelist_minors, d_rx_edgelist_weights), std::ignore) = cugraph::groupby_gpuid_and_shuffle_values( comm, // handle.get_comms(), edge_first, edge_first + d_edgelist_majors.size(), [key_func = cugraph::detail::compute_gpu_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}] __device__(auto val) { return key_func(thrust::get<0>(val), thrust::get<1>(val)); }, handle.get_stream()); } else { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(d_edgelist_majors.begin(), d_edgelist_minors.begin())); std::forward_as_tuple(std::tie(d_rx_edgelist_majors, d_rx_edgelist_minors), std::ignore) = cugraph::groupby_gpuid_and_shuffle_values( comm, // handle.get_comms(), edge_first, edge_first + d_edgelist_majors.size(), [key_func = cugraph::detail::compute_gpu_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}] __device__(auto val) { return key_func(thrust::get<0>(val), thrust::get<1>(val)); }, handle.get_stream()); } return std::make_tuple(std::move(d_rx_edgelist_majors), std::move(d_rx_edgelist_minors), std::move(d_rx_edgelist_weights)); } template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<float>>> shuffle_edgelist_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<int32_t>&& d_edgelist_majors, rmm::device_uvector<int32_t>&& d_edgelist_minors, std::optional<rmm::device_uvector<float>>&& d_edgelist_weights); template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<double>>> shuffle_edgelist_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<int32_t>&& d_edgelist_majors, rmm::device_uvector<int32_t>&& d_edgelist_minors, std::optional<rmm::device_uvector<double>>&& d_edgelist_weights); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<float>>> shuffle_edgelist_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<int64_t>&& d_edgelist_majors, rmm::device_uvector<int64_t>&& d_edgelist_minors, std::optional<rmm::device_uvector<float>>&& d_edgelist_weights); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<double>>> shuffle_edgelist_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<int64_t>&& d_edgelist_majors, rmm::device_uvector<int64_t>&& d_edgelist_minors, std::optional<rmm::device_uvector<double>>&& d_edgelist_weights); template <typename vertex_t> rmm::device_uvector<vertex_t> shuffle_vertices_by_gpu_id(raft::handle_t const& handle, rmm::device_uvector<vertex_t>&& d_vertices) { auto& comm = handle.get_comms(); auto const comm_size = comm.get_size(); rmm::device_uvector<vertex_t> d_rx_vertices(0, handle.get_stream()); std::tie(d_rx_vertices, std::ignore) = cugraph::groupby_gpuid_and_shuffle_values( comm, // handle.get_comms(), d_vertices.begin(), d_vertices.end(), [key_func = cugraph::detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size}] __device__( auto val) { return key_func(val); }, handle.get_stream()); return d_rx_vertices; } template rmm::device_uvector<int32_t> shuffle_vertices_by_gpu_id( raft::handle_t const& handle, rmm::device_uvector<int32_t>&& d_vertices); template rmm::device_uvector<int64_t> shuffle_vertices_by_gpu_id( raft::handle_t const& handle, rmm::device_uvector<int64_t>&& d_vertices); template <typename vertex_t, typename weight_t> rmm::device_uvector<size_t> groupby_and_count_edgelist_by_local_partition_id( raft::handle_t const& handle, rmm::device_uvector<vertex_t>& d_edgelist_majors, rmm::device_uvector<vertex_t>& d_edgelist_minors, std::optional<rmm::device_uvector<weight_t>>& d_edgelist_weights, bool groupby_and_count_local_partition) { auto& comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto const comm_rank = comm.get_rank(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_size = row_comm.get_size(); auto const row_comm_rank = row_comm.get_rank(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_size = col_comm.get_size(); auto const col_comm_rank = col_comm.get_rank(); auto pair_first = thrust::make_zip_iterator( thrust::make_tuple(d_edgelist_majors.begin(), d_edgelist_minors.begin())); if (groupby_and_count_local_partition) { auto local_partition_id_gpu_id_pair_op = [comm_size, row_comm_size, partition_id_key_func = cugraph::detail::compute_partition_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}, gpu_id_key_func = cugraph::detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size}] __device__(auto pair) { auto local_partition_id = partition_id_key_func(thrust::get<0>(pair), thrust::get<1>(pair)) / comm_size; // global partition id to local partition id return local_partition_id * row_comm_size + (gpu_id_key_func(thrust::get<1>(pair)) % row_comm_size); }; return d_edgelist_weights ? cugraph::groupby_and_count(pair_first, pair_first + d_edgelist_majors.size(), d_edgelist_weights->begin(), local_partition_id_gpu_id_pair_op, comm_size, handle.get_stream()) : cugraph::groupby_and_count(pair_first, pair_first + d_edgelist_majors.size(), local_partition_id_gpu_id_pair_op, comm_size, handle.get_stream()); } else { auto local_partition_id_op = [comm_size, key_func = cugraph::detail::compute_partition_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}] __device__(auto pair) { return key_func(thrust::get<0>(pair), thrust::get<1>(pair)) / comm_size; // global partition id to local partition id }; return d_edgelist_weights ? cugraph::groupby_and_count(pair_first, pair_first + d_edgelist_majors.size(), d_edgelist_weights->begin(), local_partition_id_op, col_comm_size, handle.get_stream()) : cugraph::groupby_and_count(pair_first, pair_first + d_edgelist_majors.size(), local_partition_id_op, col_comm_size, handle.get_stream()); } } template rmm::device_uvector<size_t> groupby_and_count_edgelist_by_local_partition_id( raft::handle_t const& handle, rmm::device_uvector<int32_t>& d_edgelist_majors, rmm::device_uvector<int32_t>& d_edgelist_minors, std::optional<rmm::device_uvector<float>>& d_edgelist_weights, bool groupby_and_counts_local_partition); template rmm::device_uvector<size_t> groupby_and_count_edgelist_by_local_partition_id( raft::handle_t const& handle, rmm::device_uvector<int32_t>& d_edgelist_majors, rmm::device_uvector<int32_t>& d_edgelist_minors, std::optional<rmm::device_uvector<double>>& d_edgelist_weights, bool groupby_and_counts_local_partition); template rmm::device_uvector<size_t> groupby_and_count_edgelist_by_local_partition_id( raft::handle_t const& handle, rmm::device_uvector<int64_t>& d_edgelist_majors, rmm::device_uvector<int64_t>& d_edgelist_minors, std::optional<rmm::device_uvector<float>>& d_edgelist_weights, bool groupby_and_counts_local_partition); template rmm::device_uvector<size_t> groupby_and_count_edgelist_by_local_partition_id( raft::handle_t const& handle, rmm::device_uvector<int64_t>& d_edgelist_majors, rmm::device_uvector<int64_t>& d_edgelist_minors, std::optional<rmm::device_uvector<double>>& d_edgelist_weights, bool groupby_and_counts_local_partition); } // namespace detail } // namespace cugraph
the_stack
#define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data, const int height, const int width, scalar_t y, scalar_t x) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; // do bilinear interpolation scalar_t lt = bottom_data[y_low * width + x_low]; scalar_t rt = bottom_data[y_low * width + x_high]; scalar_t lb = bottom_data[y_high * width + x_low]; scalar_t rb = bottom_data[y_high * width + x_high]; scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); return val; } template <typename scalar_t> __global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale; scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 // scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); // scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); // scalar_t bin_size_h = roi_height / pooled_height; // scalar_t bin_size_w = roi_width / pooled_width; // original scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w + 1., 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h + 1., 0.); scalar_t bin_size_h = roi_height / (pooled_height - 1.); scalar_t bin_size_w = roi_width / (pooled_width - 1.); // const scalar_t *offset_bottom_data = // bottom_data + (roi_batch_ind * channels + c) * height * width; // int sample_num_h = (sample_num > 0) // ? sample_num // : ceil(roi_height / pooled_height); // e.g., = 2 // int sample_num_w = // (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); scalar_t h = (scalar_t)ph * bin_size_h + roi_start_h; scalar_t w = (scalar_t)pw * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; }else{ scalar_t h_ratio = h - (scalar_t)hstart; scalar_t w_ratio = w - (scalar_t)wstart; int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; top_data[index] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "ROIAlignLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); ROIAlignForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data, rois_data, scalar_t(spatial_scale), sample_num, channels, height, width, pooled_height, pooled_width, top_data); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __device__ void bilinear_interpolate_gradient(const int height, const int width, scalar_t y, scalar_t x, scalar_t &w1, scalar_t &w2, scalar_t &w3, scalar_t &w4, int &x_low, int &x_high, int &y_low, int &y_high) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename scalar_t> __global__ void ROIAlignBackward( const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale; scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w + 1., 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h + 1., 0.); scalar_t bin_size_h = roi_height / (pooled_height - 1.); scalar_t bin_size_w = roi_width / (pooled_width - 1.); // scalar_t *offset_bottom_diff = // bottom_diff + (roi_batch_ind * channels + c) * height * width; // int offset_top = (n * channels + c) * pooled_height * pooled_width + // ph * pooled_width + pw; // scalar_t offset_top_diff = top_diff[offset_top]; // int sample_num_h = (sample_num > 0) // ? sample_num // : ceil(roi_height / pooled_height); // e.g., = 2 // int sample_num_w = // (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); // const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); scalar_t h = (scalar_t)ph * bin_size_h + roi_start_h; scalar_t w = (scalar_t)pw * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; if (!(h < 0 || h >= height || w < 0 || w >= width)) { scalar_t h_ratio = h - (scalar_t)(hstart); scalar_t w_ratio = w - (scalar_t)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; atomicAdd(bottom_diff + upleft, top_diff[index] * (1. - h_ratio) * (1 - w_ratio)); atomicAdd(bottom_diff + upright, top_diff[index] * (1. - h_ratio) * w_ratio); atomicAdd(bottom_diff + downleft, top_diff[index] * h_ratio * (1 - w_ratio)); atomicAdd(bottom_diff + downright, top_diff[index] * h_ratio * w_ratio); } } } int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "ROIAlignLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported\n"); exit(-1); } ROIAlignBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, top_diff, rois_data, spatial_scale, sample_num, channels, height, width, pooled_height, pooled_width, bottom_diff); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __global__ void ROIAlignAdaForward(const int nthreads, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale; scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 // scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); // scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); // scalar_t bin_size_h = roi_height / pooled_height; // scalar_t bin_size_w = roi_width / pooled_width; // original scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w + 1., 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h + 1., 0.); scalar_t bin_size_h = roi_height / (scalar_t)pooled_height; scalar_t bin_size_w = roi_width / (scalar_t)pooled_width; int stride_w = fmaxf(1,round(bin_size_w)); int stride_h = fmaxf(1,round(bin_size_h)); // const scalar_t *offset_bottom_data = // bottom_data + (roi_batch_ind * channels + c) * height * width; // int sample_num_h = (sample_num > 0) // ? sample_num // : ceil(roi_height / pooled_height); // e.g., = 2 // int sample_num_w = // (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); scalar_t h = (scalar_t)ph * bin_size_h + roi_start_h; scalar_t w = (scalar_t)pw * bin_size_w + roi_start_w; int hstart = fminf(floor((float)(ph) * bin_size_h + roi_start_h), height - 2); int wstart = fminf(floor((float)(ph) * bin_size_h + roi_start_h), height - 2); int img_start = roi_batch_ind * channels * height * width; if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; } else { for(int hidx=0; hidx<=stride_h; hidx+=stride_h){ for(int widx=0; widx<=stride_w; widx+=stride_w){ if( ((widx+wstart)>=0) && ((widx+wstart)<width) && ((hidx+hstart)>=0) && ((hidx+hstart)<height) ){ int cur_loc = img_start + (c * height + hstart) * width + wstart + hidx*width + widx; scalar_t h_ratio = 1. - (scalar_t)fabsf(h-hstart-hidx)/(scalar_t)stride_h; scalar_t w_ratio = 1. - (scalar_t)fabsf(w-wstart-widx)/(scalar_t)stride_w; top_data[index]+=bottom_data[cur_loc]*h_ratio*w_ratio; } } } } } } int ROIAlignAdaForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "ROIAlignAdaLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); ROIAlignForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data, rois_data, scalar_t(spatial_scale), sample_num, channels, height, width, pooled_height, pooled_width, top_data); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __global__ void ROIAlignAdaBackward( const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale; scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w + 1., 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h + 1., 0.); scalar_t bin_size_h = roi_height / (scalar_t)pooled_height; scalar_t bin_size_w = roi_width / (scalar_t)pooled_width; int stride_w = fmaxf(1, round(bin_size_w)); int stride_h = fmaxf(1, round(bin_size_h)); // scalar_t *offset_bottom_diff = // bottom_diff + (roi_batch_ind * channels + c) * height * width; // int offset_top = (n * channels + c) * pooled_height * pooled_width + // ph * pooled_width + pw; // scalar_t offset_top_diff = top_diff[offset_top]; // int sample_num_h = (sample_num > 0) // ? sample_num // : ceil(roi_height / pooled_height); // e.g., = 2 // int sample_num_w = // (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); // const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); scalar_t h = (scalar_t)ph * bin_size_h + roi_start_h; scalar_t w = (scalar_t)pw * bin_size_w + roi_start_w; int hstart = fminf(floor((float)(ph) * bin_size_h + roi_start_h), height - 2); int wstart = fminf(floor((float)(pw) * bin_size_w + roi_start_w), width - 2); int img_start = roi_batch_ind * channels * height * width; if (!(h < 0 || h >= height || w < 0 || w >= width)) { for(int hidx=0; hidx<=stride_h; hidx+=stride_h){ for(int widx=0; widx<=stride_w; widx+=stride_w){ if( ((hstart+hidx)>=0) && ((hstart+hidx)<height) && ((wstart+widx)>=0) && ((wstart+widx)<width) ){ int cur_loc = img_start + (c * height + hstart) * width + wstart + hidx*width + widx; scalar_t h_ratio = 1. - (scalar_t)fabsf(h-hstart-hidx)/(scalar_t)(stride_h); scalar_t w_ratio = 1. - (scalar_t)fabsf(w-wstart-widx)/(scalar_t)(stride_w); atomicAdd(bottom_diff + cur_loc, top_diff[index]*h_ratio*w_ratio); } } } } } } int ROIAlignAdaBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "ROIAlignAdaLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported\n"); exit(-1); } ROIAlignBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, top_diff, rois_data, spatial_scale, sample_num, channels, height, width, pooled_height, pooled_width, bottom_diff); })); THCudaCheck(cudaGetLastError()); return 1; }
the_stack
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/device_helper.h" #include "saiga/cuda/imageProcessing/imageProcessing.h" #include "saiga/cuda/shfl_helper.h" namespace Saiga { namespace CUDA { //todo maybe change static __constant__ float d_Kernel[SAIGA_MAX_KERNEL_SIZE]; template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS> __global__ static void d_convolveOuterLinear(ImageView<T> src, ImageView<T> dst) { const unsigned BLOCK_H2 = BLOCK_H * Y_ELEMENTS; // for radius = 4: elements = (32+8) * (16+8) = 960 = 3840 __shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS]; // for radius = 4: elements = (32+8) * (16) = 640 = 2560 __shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS]; // total s mem per block = 6400 // with 512 threads per block smem per sm: 25600 -> 100% occ int tx = threadIdx.x; int ty = threadIdx.y; int t = tx + ty * BLOCK_W; int xp = blockIdx.x * BLOCK_W + tx; int yp = blockIdx.y * BLOCK_H2 + ty; int blockStartX = blockIdx.x * BLOCK_W - RADIUS; int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS; const int blockSizeX = BLOCK_W + 2 * RADIUS; const int blockSizeY = BLOCK_H2 + 2 * RADIUS; // fill buffer for (int i = t; i < blockSizeX * blockSizeY; i += (BLOCK_W * BLOCK_H)) { int x = i % blockSizeX; int y = i / blockSizeX; int gx = x + blockStartX; int gy = y + blockStartY; src.clampToEdge(gy, gx); buffer[y][x] = src(gy, gx); } __syncthreads(); T* kernel = d_Kernel; for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H)) { int x = i % blockSizeX; int y = i / blockSizeX; T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex]; } buffer2[y][x] = sum; } __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) { T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex]; } if (dst.inImage(yp, xp)) dst(yp, xp) = sum; yp += BLOCK_H; ty += BLOCK_H; } } template <typename T, int RADIUS> inline void convolveOuterLinear(ImageView<T> src, ImageView<T> dst) { int w = src.width; int h = src.height; const int BLOCK_W = 32; const int BLOCK_H = 16; const int Y_ELEMENTS = 2; dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1); // dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H)); dim3 threads(BLOCK_W, BLOCK_H); d_convolveOuterLinear<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst); } template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS> __global__ static void d_convolveOuterHalo(ImageView<T> src, ImageView<T> dst) { const unsigned int BLOCK_H2 = BLOCK_H * Y_ELEMENTS; const unsigned int WARPS_PER_BLOCK = BLOCK_W * BLOCK_H / 32; // 16 static_assert(WARPS_PER_BLOCK == 16, "warps per block wrong"); // for radius = 4: elements = (32+8) * (16+8) = 960 = 3840 __shared__ T buffer[BLOCK_H2 + 2 * RADIUS][BLOCK_W + 2 * RADIUS]; // for radius = 4: elements = (32+8) * (16) = 640 = 2560 __shared__ T buffer2[BLOCK_H2][BLOCK_W + 2 * RADIUS]; // total s mem per block = 6400 // with 512 threads per block smem per sm: 25600 -> 100% occ int tx = threadIdx.x; int ty = threadIdx.y; int t = tx + ty * BLOCK_W; const unsigned int warp_lane = t / 32; const unsigned int lane_id = t & 31; int xp = blockIdx.x * BLOCK_W + tx; int yp = blockIdx.y * BLOCK_H2 + ty; int x = xp; int y = yp; const unsigned int x_tile = blockIdx.x * BLOCK_W; // const unsigned int y_tile = blockIdx.y * BLOCK_H2; int blockStartX = blockIdx.x * BLOCK_W - RADIUS; int blockStartY = blockIdx.y * BLOCK_H2 - RADIUS; const int blockSizeX = BLOCK_W + 2 * RADIUS; // const int blockSizeY = BLOCK_H2 + 2*RADIUS; // copy main data for (int i = 0; i < Y_ELEMENTS; ++i) { buffer[ty + i * BLOCK_H + RADIUS][tx + RADIUS] = src.clampedRead(y + i * BLOCK_H, x); } // top and bottom halo if (warp_lane < 4) { const unsigned int num_warps = 4; for (int i = warp_lane; i < RADIUS; i += num_warps) { buffer[i][lane_id + RADIUS] = src.clampedRead(blockStartY + i, x_tile + lane_id); buffer[BLOCK_H2 + RADIUS + i][lane_id + RADIUS] = src.clampedRead(blockStartY + BLOCK_H2 + RADIUS + i, x_tile + lane_id); } } const unsigned int side_halo_rows_per_warp = 32 / RADIUS; int local_warp_id = lane_id / RADIUS; int local_lane_id = lane_id % RADIUS; // left halo if (warp_lane >= 4 && warp_lane < 10) { const unsigned int num_warps = 6; int wid = warp_lane - 4; int rows = BLOCK_H2 + 2 * RADIUS; for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp) { if (local_warp_id < side_halo_rows_per_warp) { buffer[i][local_lane_id] = src.clampedRead(blockStartY + i, blockStartX + local_lane_id); } } } // right halo if (warp_lane >= 10 && warp_lane < 16) { const unsigned int num_warps = 6; int wid = warp_lane - 10; int rows = BLOCK_H2 + 2 * RADIUS; for (int i = wid * side_halo_rows_per_warp + local_warp_id; i < rows; i += num_warps * side_halo_rows_per_warp) { if (local_warp_id < side_halo_rows_per_warp) { buffer[i][local_lane_id + RADIUS + BLOCK_W] = src.clampedRead(blockStartY + i, blockStartX + local_lane_id + RADIUS + BLOCK_W); } } } __syncthreads(); T* kernel = d_Kernel; for (int i = t; i < blockSizeX * BLOCK_H2; i += (BLOCK_W * BLOCK_H)) { int x = i % blockSizeX; int y = i / blockSizeX; T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer[y + RADIUS + j][x] * kernel[kernelIndex]; } buffer2[y][x] = sum; } __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) { T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex]; } if (dst.inImage(yp, xp)) dst(yp, xp) = sum; yp += BLOCK_H; ty += BLOCK_H; } } template <typename T, int RADIUS> inline void convolveOuterHalo(ImageView<T> src, ImageView<T> dst) { int w = src.width; int h = src.height; const int BLOCK_W = 32; const int BLOCK_H = 16; const int Y_ELEMENTS = 2; dim3 blocks(Saiga::iDivUp(w, BLOCK_W), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS), 1); // dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H)); dim3 threads(BLOCK_W, BLOCK_H); d_convolveOuterHalo<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst); } template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS> __global__ static void d_convolveInner(ImageView<T> src, ImageView<T> dst) { const unsigned int TILE_H = BLOCK_H; const unsigned int TILE_W = BLOCK_W; const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // int t = tx + ty * BLOCK_W; int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS; int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS; int x = x_tile + tx; int y = y_tile + ty; __shared__ T buffer[TILE_H2][TILE_W]; __shared__ T buffer2[TILE_H2 - RADIUS * 2][TILE_W]; // copy main data for (int i = 0; i < Y_ELEMENTS; ++i) buffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x); __syncthreads(); T* kernel = d_Kernel; // convolve along y axis // if(ty > RADIUS && ty < TILE_H2 - RADIUS) // { // int oy = ty - RADIUS; for (int i = 0; i < Y_ELEMENTS; ++i) { // int gx = x; // int gy = y + i * TILE_H; int lx = tx; int ly = ty + i * TILE_H; if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue; T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer[ly + j][lx] * kernel[kernelIndex]; } buffer2[ly - RADIUS][lx] = sum; } __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) { int gx = x; int gy = y + i * TILE_H; int lx = tx; int ly = ty + i * TILE_H; if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue; if (lx < RADIUS || lx >= TILE_W - RADIUS) continue; T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; sum += buffer2[ly - RADIUS][lx + j] * kernel[kernelIndex]; } // if(dst.inImage(gx,gy)) // dst(g,yp) = sum; dst.clampedWrite(gy, gx, sum); } #if 0 for(int i =0; i < Y_ELEMENTS; ++i){ T sum = 0; # pragma unroll for (int j=-RADIUS;j<=RADIUS;j++){ int kernelIndex = j + RADIUS; sum += buffer2[ty][tx + RADIUS + j] * kernel[kernelIndex]; } if(dst.inImage(xp,yp)) dst(xp,yp) = sum; yp += BLOCK_H; ty += BLOCK_H; } #endif } template <typename T, int RADIUS, bool LOW_OCC = false> inline void convolveInner(ImageView<T> src, ImageView<T> dst) { int w = src.width; int h = src.height; const int BLOCK_W = LOW_OCC ? 64 : 32; const int BLOCK_H = LOW_OCC ? 8 : 16; const int Y_ELEMENTS = LOW_OCC ? 4 : 2; dim3 blocks(Saiga::iDivUp(w, BLOCK_W - 2 * RADIUS), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1); // dim3 blocks(Saiga::CUDA::getBlockCount(w, BLOCK_W), Saiga::CUDA::getBlockCount(h, BLOCK_H)); dim3 threads(BLOCK_W, BLOCK_H); d_convolveInner<T, RADIUS, BLOCK_W, BLOCK_H, Y_ELEMENTS><<<blocks, threads>>>(src, dst); } template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int Y_ELEMENTS> __global__ static void d_convolveInnerShuffle(ImageView<T> src, ImageView<T> dst) { const unsigned int TILE_H = BLOCK_H; const unsigned int TILE_W = BLOCK_W; const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // int t = tx + ty * BLOCK_W; unsigned int lane_id = threadIdx.x % 32; int x_tile = blockIdx.x * (TILE_W - 2 * RADIUS) - RADIUS; int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS; int x = x_tile + tx; int y = y_tile + ty; // __shared__ T buffer[TILE_H2][TILE_W]; // __shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2 + 1]; __shared__ T buffer2[TILE_H2][TILE_W - RADIUS * 2]; // __shared__ T buffer2[TILE_W - RADIUS * 2][TILE_H2]; T localElements[Y_ELEMENTS]; for (int i = 0; i < Y_ELEMENTS; ++i) { localElements[i] = src.clampedRead(y + i * TILE_H, x); } // conv row T* kernel = d_Kernel; for (int i = 0; i < Y_ELEMENTS; ++i) { int lx = tx; int ly = ty + i * TILE_H; T sum = 0; #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; auto value = shfl(localElements[i], lane_id + j); sum += value * kernel[kernelIndex]; } if (lx < RADIUS || lx >= TILE_W - RADIUS) continue; buffer2[ly][lx - RADIUS] = sum; // buffer2[lx- RADIUS][ly] = sum; } __syncthreads(); // conv col for (int i = 0; i < Y_ELEMENTS; ++i) { int gx = x; int gy = y + i * TILE_H; int lx = tx; int ly = ty + i * TILE_H; if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue; if (lx < RADIUS || lx >= TILE_W - RADIUS) continue; T sum = 0; #if 1 # pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { int kernelIndex = j + RADIUS; auto value = buffer2[ly + j][lx - RADIUS]; // auto value = buffer2[lx - RADIUS][ly + j]; sum += value * kernel[kernelIndex]; } #endif dst.clampedWrite(gy, gx, sum); } } // | ---- BLOCK_W * X_ELEMENTS * vectorSize ---- | // [ x x x x x x x x x x x x x x x x x x x x x x ] // template <typename T, int RADIUS, unsigned int BLOCK_W, unsigned int BLOCK_H, unsigned int X_ELEMENTS, unsigned int Y_ELEMENTS, typename VectorType = int2> //__launch_bounds__(BLOCK_W* BLOCK_H, 3) __global__ static void d_convolveInnerShuffle2(ImageView<T> src, ImageView<T> dst) { const unsigned int TILE_W = BLOCK_W; const unsigned int TILE_H = BLOCK_H; const unsigned int TILE_W2 = TILE_W * X_ELEMENTS; const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // int t = tx + ty * BLOCK_W; // static_assert( sizeof(VectorType) / sizeof(T) == X_ELEMENTS); unsigned int lane_id = threadIdx.x % 32; // start position of tile int x_tile = blockIdx.x * (TILE_W2 - 2 * RADIUS) - RADIUS; int y_tile = blockIdx.y * (TILE_H2 - 2 * RADIUS) - RADIUS; // global position of thread int x = x_tile + tx * X_ELEMENTS; int y = y_tile + ty; T* kernel = d_Kernel; // for vec4 radius 8: // (16 * Y_ELEMENTS) * (32 - 4) * 16 // Y 3 -> 21504 100 occ // Y 4 -> 28672 75 occ // Y 5 -> 35840 50 occ // Y 6 -> 43008 50 occ // Y 8 -> 57344 failed __shared__ VectorType buffer2[TILE_H2][TILE_W - 2 * RADIUS / X_ELEMENTS]; // own element + left and right radius VectorType localElements[Y_ELEMENTS][1 + 2 * RADIUS / X_ELEMENTS]; // 5 // without this unroll we get a strange compile error #pragma unroll for (int i = 0; i < Y_ELEMENTS; ++i) { int rowId = y + i * TILE_H; rowId = std::min(rowId, src.height - 1); rowId = std::max(0, rowId); int colId = std::max(0, x); int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS; colId = std::min(colId, xb); T* row = src.rowPtr(rowId); CUDA_ASSERT(size_t(row) % sizeof(VectorType) == 0); T* elem = row + colId; // if(rowId == 0) // printf("%d \n",colId); T* localElementsT = reinterpret_cast<T*>(localElements[i]); // center of localElements // the left and right of the center will be filled by shuffles VectorType& myValue = localElements[i][RADIUS / X_ELEMENTS]; //[i][2] // load own value from global memory (note: this is the only global memory read) CUDA_ASSERT(size_t(elem) % sizeof(VectorType) == 0); myValue = reinterpret_cast<VectorType*>(elem)[0]; if (x < 0) { for (int k = 0; k < X_ELEMENTS; ++k) { localElementsT[RADIUS + k] = localElementsT[RADIUS]; } } if (x >= src.width) { for (int k = 0; k < X_ELEMENTS; ++k) { localElementsT[RADIUS + k] = localElementsT[RADIUS + X_ELEMENTS - 1]; } } // shuffle left for (int j = 0; j < RADIUS / X_ELEMENTS; ++j) { localElements[i][j] = shfl(myValue, lane_id + j - RADIUS / X_ELEMENTS); } // shuffle right for (int j = 0; j < RADIUS / X_ELEMENTS; ++j) { localElements[i][j + RADIUS / X_ELEMENTS + 1] = shfl(myValue, lane_id + j + 1); } T sum[X_ELEMENTS]; #pragma unroll for (int j = 0; j < X_ELEMENTS; ++j) { sum[j] = 0; } #pragma unroll for (int j = -RADIUS; j <= RADIUS; j++) { T kernelValue = kernel[j + RADIUS]; #pragma unroll for (int k = 0; k < X_ELEMENTS; ++k) { sum[k] += localElementsT[RADIUS + j + k] * kernelValue; } } // write to shared memory if this thread is 'inner' (not in the halo) int lx = tx; int ly = ty + i * TILE_H; // continue if this thread is not a 'inner thread' if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue; if (x >= src.width) continue; // if(lx >= RADIUS / X_ELEMENTS && lx < TILE_W - RADIUS / X_ELEMENTS) { if (rowId <= RADIUS && colId == 508) { // printf("sum row %d %d %f \n",x,y,sum[0]); if (y == 1) for (int k = 0; k < X_ELEMENTS + 2 * RADIUS; ++k) { // printf("localElementsT %d %f \n",k,localElementsT[k]); } } buffer2[ly][lx - RADIUS / X_ELEMENTS] = reinterpret_cast<VectorType*>(sum)[0]; } // return; } // the only sync in this kernel __syncthreads(); #pragma unroll for (int i = 0; i < Y_ELEMENTS; ++i) { int rowId = y + i * TILE_H; rowId = std::min(rowId, src.height - 1); rowId = std::max(0, rowId); int colId = std::max(0, x); int xb = Saiga::iAlignUp(src.width, X_ELEMENTS) - X_ELEMENTS; colId = std::min(colId, xb); // colId = std::min(colId,src.width - X_ELEMENTS); // continue if this thread is not a 'inner thread' int lx = tx; int ly = ty + i * TILE_H; if (lx < RADIUS / X_ELEMENTS || lx >= TILE_W - RADIUS / X_ELEMENTS) continue; if (ly < RADIUS || ly >= TILE_H2 - RADIUS) continue; // continue if this thread is not in image if (x >= src.width || y + i * TILE_H >= src.height) continue; T* row = dst.rowPtr(rowId); T* elem = row + colId; T sum[X_ELEMENTS]; for (int j = 0; j < X_ELEMENTS; ++j) { sum[j] = 0; } // simple row convolution in shared memory for (int j = -RADIUS; j <= RADIUS; j++) { T kernelValue = kernel[j + RADIUS]; VectorType valueV = buffer2[ly + j][lx - RADIUS / X_ELEMENTS]; for (int k = 0; k < X_ELEMENTS; ++k) { auto v = reinterpret_cast<T*>(&valueV)[k]; sum[k] += v * kernelValue; } } reinterpret_cast<VectorType*>(elem)[0] = reinterpret_cast<VectorType*>(sum)[0]; } } template <typename T, int RADIUS, typename VectorType = int> inline void convolveInnerShuffle(ImageView<T> src, ImageView<T> dst) { int w = src.width; int h = src.height; // int p = src.pitchBytes; const int BLOCK_W = 32; const int BLOCK_H = 16; const int X_ELEMENTS = sizeof(VectorType) / sizeof(T); const int Y_ELEMENTS = 4; dim3 blocks(Saiga::iDivUp(w, BLOCK_W * X_ELEMENTS - 2 * RADIUS), Saiga::iDivUp(h, BLOCK_H * Y_ELEMENTS - 2 * RADIUS), 1); dim3 threads(BLOCK_W, BLOCK_H); if (sizeof(VectorType) >= 8) cudaFuncSetSharedMemConfig( d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>, cudaSharedMemBankSizeEightByte); else cudaFuncSetSharedMemConfig( d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType>, cudaSharedMemBankSizeFourByte); d_convolveInnerShuffle2<T, RADIUS, BLOCK_W, BLOCK_H, X_ELEMENTS, Y_ELEMENTS, VectorType> <<<blocks, threads>>>(src, dst); // d_convolveInnerShuffle3<T,12,BLOCK_W,BLOCK_H,4,Y_ELEMENTS,int4> <<<blocks, threads>>>(src,dst); CUDA_SYNC_CHECK_ERROR(); } void convolveSinglePassSeparateOuterLinear(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius) { CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice)); switch (radius) { case 1: CUDA::convolveOuterLinear<float, 1>(src, dst); break; case 2: CUDA::convolveOuterLinear<float, 2>(src, dst); break; case 3: CUDA::convolveOuterLinear<float, 3>(src, dst); break; case 4: CUDA::convolveOuterLinear<float, 4>(src, dst); break; case 5: CUDA::convolveOuterLinear<float, 5>(src, dst); break; case 6: CUDA::convolveOuterLinear<float, 6>(src, dst); break; case 7: CUDA::convolveOuterLinear<float, 7>(src, dst); break; case 8: CUDA::convolveOuterLinear<float, 8>(src, dst); break; case 9: CUDA::convolveOuterLinear<float, 9>(src, dst); break; } } void convolveSinglePassSeparateOuterHalo(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius) { CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice)); switch (radius) { case 1: CUDA::convolveOuterHalo<float, 1>(src, dst); break; case 2: CUDA::convolveOuterHalo<float, 2>(src, dst); break; case 3: CUDA::convolveOuterHalo<float, 3>(src, dst); break; case 4: CUDA::convolveOuterHalo<float, 4>(src, dst); break; case 5: CUDA::convolveOuterHalo<float, 5>(src, dst); break; case 6: CUDA::convolveOuterHalo<float, 6>(src, dst); break; case 7: CUDA::convolveOuterHalo<float, 7>(src, dst); break; case 8: CUDA::convolveOuterHalo<float, 8>(src, dst); break; } } void convolveSinglePassSeparateInner(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius) { CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice)); switch (radius) { case 1: CUDA::convolveInner<float, 1>(src, dst); break; case 2: CUDA::convolveInner<float, 2>(src, dst); break; case 3: CUDA::convolveInner<float, 3>(src, dst); break; case 4: CUDA::convolveInner<float, 4>(src, dst); break; case 5: CUDA::convolveInner<float, 5>(src, dst); break; case 6: CUDA::convolveInner<float, 6>(src, dst); break; case 7: CUDA::convolveInner<float, 7>(src, dst); break; case 8: CUDA::convolveInner<float, 8>(src, dst); break; } } void convolveSinglePassSeparateInner75(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius) { CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice)); switch (radius) { case 1: CUDA::convolveInner<float, 1, true>(src, dst); break; case 2: CUDA::convolveInner<float, 2, true>(src, dst); break; case 3: CUDA::convolveInner<float, 3, true>(src, dst); break; case 4: CUDA::convolveInner<float, 4, true>(src, dst); break; case 5: CUDA::convolveInner<float, 5, true>(src, dst); break; case 6: CUDA::convolveInner<float, 6, true>(src, dst); break; case 7: CUDA::convolveInner<float, 7, true>(src, dst); break; case 8: CUDA::convolveInner<float, 8, true>(src, dst); break; } } void convolveSinglePassSeparateInnerShuffle(ImageView<float> src, ImageView<float> dst, Saiga::ArrayView<float> kernel, int radius) { CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, kernel.data(), kernel.size() * sizeof(float), 0, cudaMemcpyDeviceToDevice)); switch (radius) { case 0: CUDA::convolveInnerShuffle<float, 0, int>(src, dst); break; case 1: CUDA::convolveInnerShuffle<float, 1, int>(src, dst); break; case 2: CUDA::convolveInnerShuffle<float, 2, int2>(src, dst); break; case 3: CUDA::convolveInnerShuffle<float, 3, int>(src, dst); break; case 4: CUDA::convolveInnerShuffle<float, 4, int4>(src, dst); break; case 5: CUDA::convolveInnerShuffle<float, 5, int>(src, dst); break; case 6: CUDA::convolveInnerShuffle<float, 6, int2>(src, dst); break; case 7: CUDA::convolveInnerShuffle<float, 7, int>(src, dst); break; case 8: CUDA::convolveInnerShuffle<float, 8, int4>(src, dst); break; case 9: CUDA::convolveInnerShuffle<float, 9, int>(src, dst); break; case 10: CUDA::convolveInnerShuffle<float, 10, int2>(src, dst); break; case 11: CUDA::convolveInnerShuffle<float, 11, int>(src, dst); break; case 12: CUDA::convolveInnerShuffle<float, 12, int2>(src, dst); break; case 13: CUDA::convolveInnerShuffle<float, 13, int>(src, dst); break; case 14: CUDA::convolveInnerShuffle<float, 14, int2>(src, dst); break; case 15: CUDA::convolveInnerShuffle<float, 15, int>(src, dst); break; case 16: CUDA::convolveInnerShuffle<float, 16, int4>(src, dst); break; // case 17: CUDA::convolveInnerShuffle<float,17,int>(src,dst); break; // case 18: CUDA::convolveInnerShuffle<float,18,int2>(src,dst); break; // case 19: CUDA::convolveInnerShuffle<float,19,int>(src,dst); break; case 20: CUDA::convolveInnerShuffle<float, 20, int4>(src, dst); break; // case 21: CUDA::convolveInnerShuffle<float,21,int>(src,dst); break; // case 22: CUDA::convolveInnerShuffle<float,22,int2>(src,dst); break; // case 23: CUDA::convolveInnerShuffle<float,23,int>(src,dst); break; case 24: CUDA::convolveInnerShuffle<float, 24, int4>(src, dst); break; } } } // namespace CUDA } // namespace Saiga
the_stack
#define restrict __restrict #define max(a,b) ((a<b)?b:a) #define min(a,b) ((a<b)?a:b) const int WSIZE = 12000; // Walker const int NSIZE = 2003; // Values const int MSIZE = NSIZE*3+3; // Gradient vectors const int OSIZE = NSIZE*9+9; // Hessian Matrices const int NSIZE_round = NSIZE%16 ? NSIZE+16-NSIZE%16: NSIZE; const size_t SSIZE = (size_t)NSIZE_round*48*48*48; //Coefs size __device__ static inline void eval_UBspline_3d_s_vgh ( const float * restrict coefs_init, const intptr_t xs, const intptr_t ys, const intptr_t zs, float * restrict vals, float * restrict grads, float * restrict hess, const float * a, const float * b, const float * c, const float * da, const float * db, const float * dc, const float * d2a, const float * d2b, const float * d2c, const float dxInv, const float dyInv, const float dzInv); void eval_abc(const float * restrict Af, float tx, float * restrict a); __global__ void bspline ( const float *restrict spline_coefs, const intptr_t xs, const intptr_t ys, const intptr_t zs, float *restrict walkers_vals, float *restrict walkers_grads, float *restrict walkers_hess, const float* a, const float* b, const float* c, const float* da, const float* db, const float* dc, const float* d2a, const float* d2b, const float* d2c, const float spline_x_grid_delta_inv, const float spline_y_grid_delta_inv, const float spline_z_grid_delta_inv, const int spline_num_splines, const int i, const int ix, const int iy, const int iz ) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < spline_num_splines) eval_UBspline_3d_s_vgh ( spline_coefs+ix*xs+iy*ys+iz*zs+n, xs, ys, zs, walkers_vals+i*NSIZE+n, walkers_grads+i*MSIZE+n*3, walkers_hess+i*OSIZE+n*9, a, b, c, da, db, dc, d2a, d2b, d2c, spline_x_grid_delta_inv, spline_y_grid_delta_inv, spline_z_grid_delta_inv ); } int main(int argc, char ** argv){ float *Af = (float*) malloc (sizeof(float)*16); float *dAf = (float*) malloc (sizeof(float)*16); float *d2Af = (float*) malloc (sizeof(float)*16); Af[0]=-0.166667; Af[1]=0.500000; Af[2]=-0.500000; Af[3]=0.166667; Af[4]=0.500000; Af[5]=-1.000000; Af[6]=0.000000; Af[7]=0.666667; Af[8]=-0.500000; Af[9]=0.500000; Af[10]=0.500000; Af[11]=0.166667; Af[12]=0.166667; Af[13]=0.000000; Af[14]=0.000000; Af[15]=0.000000; dAf[0]=0.000000; d2Af[0]=0.000000; dAf[1]=-0.500000; d2Af[1]=0.000000; dAf[2]=1.000000; d2Af[2]=-1.000000; dAf[3]=-0.500000; d2Af[3]=1.000000; dAf[4]=0.000000; d2Af[4]=0.000000; dAf[5]=1.500000; d2Af[5]=0.000000; dAf[6]=-2.000000; d2Af[6]=3.000000; dAf[7]=0.000000; d2Af[7]=-2.000000; dAf[8]=0.000000; d2Af[8]=0.000000; dAf[9]=-1.500000; d2Af[9]=0.000000; dAf[10]=1.000000; d2Af[10]=-3.00000; dAf[11]=0.500000; d2Af[11]=1.000000; dAf[12]=0.000000; d2Af[12]=0.000000; dAf[13]=0.500000; d2Af[13]=0.000000; dAf[14]=0.000000; d2Af[14]=1.000000; dAf[15]=0.000000; d2Af[15]=0.000000; float x=0.822387; float y=0.989919; float z=0.104573; float* walkers_vals = (float*) malloc(sizeof(float)*WSIZE*NSIZE); float* walkers_grads = (float*) malloc(sizeof(float)*WSIZE*MSIZE); float* walkers_hess = (float*) malloc(sizeof(float)*WSIZE*OSIZE); float* walkers_x = (float*) malloc(sizeof(float)*WSIZE); float* walkers_y = (float*) malloc(sizeof(float)*WSIZE); float* walkers_z = (float*) malloc(sizeof(float)*WSIZE); for (int i=0; i<WSIZE; i++) { walkers_x[i] = x + i*1.0/WSIZE; walkers_y[i] = y + i*1.0/WSIZE; walkers_z[i] = z + i*1.0/WSIZE; } float* spline_coefs = (float*) malloc (sizeof(float)*SSIZE); for(size_t i=0;i<SSIZE;i++) spline_coefs[i]=sqrt(0.22+i*1.0)*sin(i*1.0); int spline_num_splines = NSIZE; int spline_x_grid_start = 0; int spline_y_grid_start = 0; int spline_z_grid_start = 0; int spline_x_grid_num = 45; int spline_y_grid_num = 45; int spline_z_grid_num = 45; int spline_x_stride=NSIZE_round*48*48; int spline_y_stride=NSIZE_round*48; int spline_z_stride=NSIZE_round; int spline_x_grid_delta_inv=45; int spline_y_grid_delta_inv=45; int spline_z_grid_delta_inv=45; float* d_walkers_vals; hipMalloc((void**)&d_walkers_vals, sizeof(float)*WSIZE*NSIZE); hipMemcpyAsync(d_walkers_vals, walkers_vals, sizeof(float)*WSIZE*NSIZE, hipMemcpyHostToDevice, 0); float* d_walkers_grads; hipMalloc((void**)&d_walkers_grads, sizeof(float)*WSIZE*MSIZE); hipMemcpyAsync(d_walkers_grads, walkers_grads, sizeof(float)*WSIZE*MSIZE, hipMemcpyHostToDevice, 0); float* d_walkers_hess; hipMalloc((void**)&d_walkers_hess, sizeof(float)*WSIZE*OSIZE); hipMemcpyAsync(d_walkers_hess, walkers_hess, sizeof(float)*WSIZE*OSIZE, hipMemcpyHostToDevice, 0); float* d_spline_coefs; hipMalloc((void**)&d_spline_coefs, sizeof(float)*SSIZE); hipMemcpyAsync(d_spline_coefs, spline_coefs, sizeof(float)*SSIZE, hipMemcpyHostToDevice, 0); float* d_a; hipMalloc((void**)&d_a, sizeof(float)*4); float* d_b; hipMalloc((void**)&d_b, sizeof(float)*4); float* d_c; hipMalloc((void**)&d_c, sizeof(float)*4); float* d_da; hipMalloc((void**)&d_da, sizeof(float)*4); float* d_db; hipMalloc((void**)&d_db, sizeof(float)*4); float* d_dc; hipMalloc((void**)&d_dc, sizeof(float)*4); float* d_d2a; hipMalloc((void**)&d_d2a, sizeof(float)*4); float* d_d2b; hipMalloc((void**)&d_d2b, sizeof(float)*4); float* d_d2c; hipMalloc((void**)&d_d2c, sizeof(float)*4); for(int i=0; i<WSIZE; i++){ float x = walkers_x[i], y = walkers_y[i], z = walkers_z[i]; float ux = x*spline_x_grid_delta_inv; float uy = y*spline_y_grid_delta_inv; float uz = z*spline_z_grid_delta_inv; float ipartx, iparty, ipartz, tx, ty, tz; float a[4], b[4], c[4], da[4], db[4], dc[4], d2a[4], d2b[4], d2c[4]; intptr_t xs = spline_x_stride; intptr_t ys = spline_y_stride; intptr_t zs = spline_z_stride; x -= spline_x_grid_start; y -= spline_y_grid_start; z -= spline_z_grid_start; ipartx = (int) ux; tx = ux-ipartx; int ix = min(max(0,(int) ipartx),spline_x_grid_num-1); iparty = (int) uy; ty = uy-iparty; int iy = min(max(0,(int) iparty),spline_y_grid_num-1); ipartz = (int) uz; tz = uz-ipartz; int iz = min(max(0,(int) ipartz),spline_z_grid_num-1); eval_abc(Af,tx,&a[0]); hipMemcpyAsync(d_a, a, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(Af,ty,&b[0]); hipMemcpyAsync(d_b, b, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(Af,tz,&c[0]); hipMemcpyAsync(d_c, c, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(dAf,tx,&da[0]); hipMemcpyAsync(d_da, da, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(dAf,ty,&db[0]); hipMemcpyAsync(d_db, db, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(dAf,tz,&dc[0]); hipMemcpyAsync(d_dc, dc, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(d2Af,tx,&d2a[0]); hipMemcpyAsync(d_d2a, d2a, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(d2Af,ty,&d2b[0]); hipMemcpyAsync(d_d2b, d2b, sizeof(float)*4, hipMemcpyHostToDevice, 0); eval_abc(d2Af,tz,&d2c[0]); hipMemcpyAsync(d_d2c, d2c, sizeof(float)*4, hipMemcpyHostToDevice, 0); dim3 global_size((spline_num_splines+255)/256*256); dim3 local_size(256); hipLaunchKernelGGL(bspline, dim3(global_size), dim3(local_size), 0, 0, d_spline_coefs, xs, ys, zs, d_walkers_vals, d_walkers_grads, d_walkers_hess, d_a, d_b, d_c, d_da, d_db, d_dc, d_d2a, d_d2b, d_d2c, spline_x_grid_delta_inv, spline_y_grid_delta_inv, spline_z_grid_delta_inv, spline_num_splines, i, ix, iy, iz ); hipDeviceSynchronize(); } hipMemcpyAsync(walkers_vals, d_walkers_vals, sizeof(float)*WSIZE*NSIZE, hipMemcpyDeviceToHost, 0); hipMemcpyAsync(walkers_grads, d_walkers_grads, sizeof(float)*WSIZE*MSIZE, hipMemcpyDeviceToHost, 0); hipMemcpyAsync(walkers_hess, d_walkers_hess, sizeof(float)*WSIZE*OSIZE, hipMemcpyDeviceToHost, 0); hipDeviceSynchronize(); // collect results for the first walker float resVal = 0.0; float resGrad = 0.0; float resHess = 0.0; for( int i = 0; i < NSIZE; i++ ) resVal = resVal + walkers_vals[i]; for( int i = 0; i < MSIZE; i++ ) resGrad = resGrad + walkers_grads[i]; for( int i = 0; i < OSIZE; i++ ) resHess = resHess + walkers_hess[i]; printf("walkers[0]->collect([resVal resGrad resHess]) = [%e %e %e]\n", resVal,resGrad, resHess); free(Af); free(dAf); free(d2Af); free(walkers_vals); free(walkers_grads); free(walkers_hess); free(walkers_x); free(walkers_y); free(walkers_z); free(spline_coefs); return 0; } void eval_abc(const float * restrict Af, float tx, float * restrict a){ a[0] = ( ( Af[0] * tx + Af[1] ) * tx + Af[2] ) * tx + Af[3]; a[1] = ( ( Af[4] * tx + Af[5] ) * tx + Af[6] ) * tx + Af[7]; a[2] = ( ( Af[8] * tx + Af[9] ) * tx + Af[10] ) * tx + Af[11]; a[3] = ( ( Af[12] * tx + Af[13] ) * tx + Af[14] ) * tx + Af[15]; } __device__ static inline void eval_UBspline_3d_s_vgh ( const float * restrict coefs_init, const intptr_t xs, const intptr_t ys, const intptr_t zs, float * restrict vals, float * restrict grads, float * restrict hess, const float * a, const float * b, const float * c, const float * da, const float * db, const float * dc, const float * d2a, const float * d2b, const float * d2c, const float dxInv, const float dyInv, const float dzInv) { float h[9]; float v0 = 0.0f; for (int i = 0; i < 9; ++i) h[i] = 0.0f; for (int i=0; i<4; i++) for (int j=0; j<4; j++){ float pre20 = d2a[i]* b[j]; float pre10 = da[i]* b[j]; float pre00 = a[i]* b[j]; float pre11 = da[i]* db[j]; float pre01 = a[i]* db[j]; float pre02 = a[i]*d2b[j]; const float * restrict coefs = coefs_init + i*xs + j*ys; float sum0 = c[0] * coefs[0] + c[1] * coefs[zs] + c[2] * coefs[zs*2] + c[3] * coefs[zs*3]; float sum1 = dc[0] * coefs[0] + dc[1] * coefs[zs] + dc[2] * coefs[zs*2] + dc[3] * coefs[zs*3]; float sum2 = d2c[0] * coefs[0] + d2c[1] * coefs[zs] + d2c[2] * coefs[zs*2] + d2c[3] * coefs[zs*3]; h[0] += pre20 * sum0; h[1] += pre11 * sum0; h[2] += pre10 * sum1; h[4] += pre02 * sum0; h[5] += pre01 * sum1; h[8] += pre00 * sum2; h[3] += pre10 * sum0; h[6] += pre01 * sum0; h[7] += pre00 * sum1; v0 += pre00 * sum0; } vals[0] = v0; grads[0] = h[3] * dxInv; grads[1] = h[6] * dyInv; grads[2] = h[7] * dzInv; hess [0] = h[0]*dxInv*dxInv; hess [1] = h[1]*dxInv*dyInv; hess [2] = h[2]*dxInv*dzInv; hess [3] = h[1]*dxInv*dyInv; // Copy hessian elements into lower half of 3x3 matrix hess [4] = h[4]*dyInv*dyInv; hess [5] = h[5]*dyInv*dzInv; hess [6] = h[2]*dxInv*dzInv; // Copy hessian elements into lower half of 3x3 matrix hess [7] = h[5]*dyInv*dzInv; //Copy hessian elements into lower half of 3x3 matrix hess [8] = h[8]*dzInv*dzInv; }
the_stack
// configuration //#include <portinfo> // STL #include <complex> // pyre #include <pyre/journal.h> // cuda #include <cuda_runtime.h> #include <cooperative_groups.h> // pull the declarations #include "kernels.h" // the correlation kernel template <std::size_t T, typename value_t = float> __global__ void _correlate(const value_t * arena, const value_t * refStats, const value_t * tgtStats, std::size_t rdim, std::size_t rcells, std::size_t tdim, std::size_t tcells, std::size_t cdim, std::size_t ccells, std::size_t row, std::size_t col, value_t * correlation); // implementation void ampcor::cuda::kernels:: correlate(const float * dArena, const float * refStats, const float * tgtStats, std::size_t pairs, std::size_t refCells, std::size_t tgtCells, std::size_t corCells, std::size_t refDim, std::size_t tgtDim, std::size_t corDim, float * dCorrelation) { // make a channel pyre::journal::debug_t channel("ampcor.cuda"); // figure out the job layout and launch the calculation on the device // each thread block takes care of one tile pair, so we need as many blocks as there are pairs auto B = pairs; // the number of threads per block is determined by the shape of the reference tile auto T = refDim; // each thread stores in shared memory the partial sum for the numerator term and the // partial sum for the target tile variance; so we need two {value_t}'s worth of shared // memory for each thread auto S = 2 * T * sizeof(float); // show me channel << pyre::journal::at(__HERE__) << "launching " << B << " blocks of " << T << " threads each, with " << S << " bytes of shared memory per block, for each of the " << corCells << " possible placements of the search window within the target tile;" << " a grand total of " << (B*corCells) << " kernel launches" << pyre::journal::endl; // for storing error codes cudaError_t status = cudaSuccess; // go through all possible row offsets for the sliding window for (auto row = 0; row < corDim; ++row) { // and all possible column offsets for (auto col = 0; col < corDim; ++col) { // deduce the correct kernel to launch and deploy // N.B.: kernel launch is an implicit barrier, so no need for any extra // synchronization if (refDim <= 32) { // tell me channel << "deploying the 32x32 kernel"; // do it _correlate<32> <<<B,32,S>>> (dArena, refStats, tgtStats, refDim, refCells, tgtDim, tgtCells, corDim, corCells, row, col, dCorrelation); } else if (refDim <= 64) { // tell me channel << "deploying the 64x64 kernel"; // do it _correlate<64> <<<B,64,S>>> (dArena, refStats, tgtStats, refDim, refCells, tgtDim, tgtCells, corDim, corCells, row, col, dCorrelation); } else if (refDim <= 128) { // tell me channel << "deploying the 128x128 kernel"; // do it _correlate<128> <<<B,128,S>>> (dArena, refStats, tgtStats, refDim, refCells, tgtDim, tgtCells, corDim, corCells, row, col, dCorrelation); } else if (refDim <= 256) { // tell me channel << "deploying the 256x256 kernel"; // do it _correlate<256> <<<B,256,S>>> (dArena, refStats, tgtStats, refDim, refCells, tgtDim, tgtCells, corDim, corCells, row, col, dCorrelation); } else if (refDim <= 512) { // tell me channel << "deploying the 512x512 kernel"; // do it _correlate<512> <<<B,512,S>>> (dArena, refStats, tgtStats, refDim, refCells, tgtDim, tgtCells, corDim, corCells, row, col, dCorrelation); } else { // complain throw std::runtime_error("cannot handle reference tiles of this shape"); } // check for errors status = cudaPeekAtLastError(); // if something went wrong if (status != cudaSuccess) { // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "after launching the " << row << "x" << col << " correlators: " << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail break; } } // if something went wrong in the inner loop if (status != cudaSuccess) { // bail out of the outer loop as well break; } } // wait for the device to finish status = cudaDeviceSynchronize(); // check if (status != cudaSuccess) { // get the error description std::string description = cudaGetErrorName(status); // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "while waiting for a kernel to finish: " << description << " (" << status << ")" << pyre::journal::endl; // and bail throw std::runtime_error(description); } // all done return; } // the correlation kernel template <std::size_t T, typename value_t> __global__ void _correlate(const value_t * arena, // the dataspace const value_t * refStats, // the hyper-grid of reference tile variances const value_t * tgtStats, // the hyper-grid of target tile averages std::size_t rdim, std::size_t rcells, // ref grid shape and size std::size_t tdim, std::size_t tcells, // tgt grid shape and size std::size_t cdim, std::size_t ccells, // cor grid shape and size std::size_t row, std::size_t col, value_t * correlation) { // build the workload descriptors // global // std::size_t B = gridDim.x; // number of blocks // std::size_t T = blockDim.x; // number of threads per block // std::size_t W = B*T; // total number of workers // local std::size_t b = blockIdx.x; // my block id std::size_t t = threadIdx.x; // my thread id within my block // std::size_t w = b*T + t; // my worker id // N.B.: do not be tempted to terminate early threads that have no assigned workload; their // participation is required to make sure that shared memory is properly zeored out for the // nominally out of bounds accesses // get access to my shared memory extern __shared__ value_t scratch[]; // get a handle to this thread block group cooperative_groups::thread_block cta = cooperative_groups::this_thread_block(); // initialize the numerator term value_t numerator = 0; // initialize the target variance accumulator value_t tgtVariance = 0; // look up the mean target amplitude auto mean = tgtStats[b*ccells + row*cdim + col]; // reference and target grids are interleaved; compute the stride std::size_t stride = rcells + tcells; // my {ref} starting point is column {t} of grid {b} auto ref = arena + b*stride + t; // my {tgt} starting point is column {t} of grid {b} at (row, col) // value_t * tgt = arena + b*stride + rcells + (row*tdim + col) + t; // or, more simply auto tgt = ref + rcells + (row*tdim + col); // if my thread id is less than the number of columns in the reference tile, i need to sum // up the contributions to the numerator and the target tile variance from my column; if // not, m y contribution is zero out my slots in shared memory if (t < rdim) { //run down the two columns for (std::size_t idx=0; idx < rdim; ++idx) { // fetch the ref value value_t r = ref[idx*rdim]; // fetch the tgt value and subtract the mean target amplitude value_t t = tgt[idx*tdim] - mean; // update the numerator numerator += r * t; // and the target variance tgtVariance += t * t; } } // save my partial results scratch[2*t] = numerator; scratch[2*t + 1] = tgtVariance; // barrier: make sure everybody is done cta.sync(); // now do the reduction in shared memory // for progressively smaller block sizes, the bottom half of the threads collect partial sums // N.B.: T is a template parameter, known at compile time, so it's easy for the optimizer to // eliminate the impossible clauses // for 512 threads per block if (T >= 512 && t < 256) { // my sibling's offset auto offset = 2*(t+256); // update my partial sum by reading my sibling's value numerator += scratch[offset]; // ditto for the target variance tgtVariance += scratch[offset+1]; // and make them available scratch[2*t] = numerator; scratch[2*t+1] = tgtVariance; } // make sure everybody is done cta.sync(); // for 256 threads per block if (T >= 256 && t < 128) { // my sibling's offset auto offset = 2*(t+128); // update my partial sum by reading my sibling's value numerator += scratch[offset]; // ditto for the target variance tgtVariance += scratch[offset+1]; // and make them available scratch[2*t] = numerator; scratch[2*t+1] = tgtVariance; } // make sure everybody is done cta.sync(); // for 128 threads per block if (T >= 128 && t < 64) { // my sibling's offset auto offset = 2*(t+64); // update my partial sum by reading my sibling's value numerator += scratch[offset]; // ditto for the target variance tgtVariance += scratch[offset+1]; // and make them available scratch[2*t] = numerator; scratch[2*t+1] = tgtVariance; } // make sure everybody is done cta.sync(); // on recent architectures, there is a faster way to do the reduction once we reach the // warp level; the only cost is that we have to make sure there is enough memory for 64 // threads, i.e. the shared memory size is bound from below by 64*sizeof(value_t) if (t < 32) { // if we need to if (T >= 64) { // my sibling's offset auto offset = 2*(t+32); // pull a neighbor's value numerator += scratch[offset]; tgtVariance += scratch[offset+1]; } // get a handle to the active thread group cooperative_groups::coalesced_group active = cooperative_groups::coalesced_threads(); // the power-of-2 threads for (int offset = 16; offset > 0; offset >>= 1) { // reduce using {shuffle} numerator += active.shfl_down(numerator, offset); tgtVariance += active.shfl_down(tgtVariance, offset); } } // finally, the master thread of each block if (t == 0) { // looks up the sqrt of the reference tile variance value_t refVariance = refStats[b]; // computes the correlation auto corr = numerator / refVariance / std::sqrt(tgtVariance); // computes the slot where this result goes std::size_t slot = b*ccells + row*cdim + col; // and writes the sum to the result vector correlation[slot] = corr; } // all done return; } // end of file
the_stack
extern "C" { __device__ __inline__ unsigned int AsUint(float *sourceImage, int pixelId) { return *(((unsigned int*)sourceImage) + pixelId); } __device__ __inline__ unsigned int GetComponent(unsigned int pixel, int comp) { return (pixel >> (comp * 8)) & 0xFF; } /* Draws a background color into a 3-component image. inputWidth & inputHeight: map dimensions in pixels gridDim.y = 3, one for each color component */ __global__ void DrawRgbBackgroundKernel(float *target, int inputWidth, int inputHeight, float r, float g, float b) { int column = threadIdx.x + blockDim.x * blockIdx.z; if (column >= inputWidth) return; int id = inputWidth * ( blockIdx.y * gridDim.x + blockIdx.x) // blockIdx.x == row, blockIdx.y == color channel + column; int imagePixels = inputWidth * inputHeight; if (id < 3*imagePixels) // 3 for RGB { float color = 0.0f; switch (blockIdx.y) { case 0: color = r; break; case 1: color = g; break; case 2: color = b; break; } target[id] = color; } } /* Adds noise into a 3-component image. inputWidth & inputHeight: map dimensions in pixels */ __global__ void AddRgbNoiseKernel(float *target, int inputWidth, int inputHeight, float *randoms, int isBlackAndWhiteNoise) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int imagePixels = inputWidth * inputHeight; if (id < imagePixels) { unsigned int tg = *((unsigned int*)(&target[id])); int random = randoms[id]; int blue = (tg >> 0) & (0xFF); blue += random; blue = blue < 255 ? blue : 255; blue = blue > 0 ? blue : 0; int green = ((tg >> 8) & (0xFF)); green += (isBlackAndWhiteNoise ? random : (int)(randoms[id + imagePixels])); green = green < 255 ? green : 255; green = green > 0 ? green : 0; int red = ((tg >> 16) & (0xFF)); red += (isBlackAndWhiteNoise ? random : (int)(randoms[id + imagePixels * 2])); red = red < 255 ? red : 255; red = red > 0 ? red : 0; // alpha is the last channel (<< 24) unsigned int tmp = (*((unsigned int *)(&blue)) << 0) | (*((unsigned int *)(&green)) << 8) | (*((unsigned int *)(&red)) << 16); target[id] = *((float *)(&tmp)); } } /* Fill specified rectangle with color */ __global__ void DrawRgbaColorKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, int areaWidth, int areaHeight, float r, float g, float b) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = areaWidth * areaHeight; int idTextureRgb = id / texturePixels; int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster int idTextureY = idTexturePixel / areaWidth; int idTextureX = (idTexturePixel - idTextureY * areaWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idTextureRgb < 3) // 3 channels that we will write to { // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { float color = 0.0f; switch (idTextureRgb) { case 0: color = r; break; case 1: color = g; break; case 2: color = b; break; } int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); target[tIndex] = color; } } } /* Draws a texture into a 3-component target. RGBA. Checks bounds. */ __global__ void DrawRgbaTextureKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int idTextureRgb = id / texturePixels; int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster int idTextureY = idTexturePixel / textureWidth; int idTextureX = (idTexturePixel - idTextureY * textureWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idTextureRgb < 3) // 3 channels that we will write to { // the texture is in BGR format, we want RGB switch (idTextureRgb) { case 0: // R idTextureRgb = 2; // B break; case 2: // B idTextureRgb = 0; // R break; } // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture float a = texture[aIndex]; target[tIndex] = target[tIndex] * (1.0f - a) + a * texture[id]; } } } /* Draws a texture into a 3-component target. RGBA. Checks bounds. Stretches the texture. */ __global__ void DrawRgbaTextureKernelNearestNeighbor(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight, int objectWidth, int objectHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int objectPixels = objectWidth * objectHeight; int idObjectRgb = id / objectPixels; int idObjectPixel = (id - idObjectRgb * objectPixels); // same as (id % objectPixels), but the kernel runs 10% faster int idObjectY = idObjectPixel / objectWidth; int idObjectX = (idObjectPixel - idObjectY * objectWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idObjectRgb < 3) // 3 channels that we will write to { int targetRgb = idObjectRgb; // the texture is in BGR format, we want RGB switch (idObjectRgb) { case 0: // R targetRgb = 2; // B break; case 2: // B targetRgb = 0; // R break; } // if the object pixel offset by inputX, inputY, lies inside the target if (idObjectX + inputX < targetWidth && idObjectX + inputX >= 0 && idObjectY + inputY < targetHeight && idObjectY + inputY >= 0) { // nearest neighbor texture X,Y: int textureX = textureWidth * idObjectX / objectWidth; int textureY = textureHeight * idObjectY / objectHeight; int textureId = textureY * textureWidth + textureX; int rgbIndex = textureId + idObjectRgb * texturePixels; float textureValue = texture[rgbIndex]; int tIndex = targetPixels * targetRgb + targetWidth * (idObjectY + inputY) + (idObjectX + inputX); int aIndex = textureId + 3 * texturePixels; // the A component of the texture float a = texture[aIndex]; target[tIndex] = target[tIndex] * (1.0f - a) + a * textureValue; } } } /* Same as DrawRgbaTextureKernelNearestNeighbor, but texture = mask and texture's pixel values are replaced by a single color */ __global__ void DrawMaskedColorKernelNearestNeighbor(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight, int objectWidth, int objectHeight, float r, float g, float b ) // texture = mask { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int objectPixels = objectWidth * objectHeight; int idObjectRgb = id / objectPixels; int idObjectPixel = (id - idObjectRgb * objectPixels); // same as (id % objectPixels), but the kernel runs 10% faster int idObjectY = idObjectPixel / objectWidth; int idObjectX = (idObjectPixel - idObjectY * objectWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idObjectRgb < 3) // 3 channels that we will write to { int targetRgb = idObjectRgb; // the texture is in BGR format, we want RGB switch (idObjectRgb) { case 0: // R targetRgb = 2; // B break; case 2: // B targetRgb = 0; // R break; } // if the object pixel offset by inputX, inputY, lies inside the target if (idObjectX + inputX < targetWidth && idObjectX + inputX >= 0 && idObjectY + inputY < targetHeight && idObjectY + inputY >= 0) { // nearest neighbor texture X,Y: int textureX = textureWidth * idObjectX / objectWidth; int textureY = textureHeight * idObjectY / objectHeight; int textureId = textureY * textureWidth + textureX; int tIndex = targetPixels * targetRgb + targetWidth * (idObjectY + inputY) + (idObjectX + inputX); int aIndex = textureId + 3 * texturePixels; // the A component of the texture float a = texture[aIndex]; if (a > 0) // mask allows color here { // apply this: target[tIndex] = target[tIndex] * (1.0f - a) + a * color; target[tIndex] = target[tIndex] * (1.0f - a); switch (idObjectRgb) { case 0: target[tIndex] += a*r; break; case 1: target[tIndex] += a*g; break; case 2: default: target[tIndex] += a*b; break; } } } } } /* Optimized version of DrawRgbaTextureKernel : avoids division operations (~30% speedup) The width of the texture is in blockDim.x The height of the texture is distributed between blockDim.y and gridDim.x */ __global__ void DrawRgbaTextureKernel2DBlock(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight) { int id = blockDim.x * blockDim.y * (blockIdx.y * gridDim.x + blockIdx.x) + blockDim.x * threadIdx.y + threadIdx.x; // 2D grid of 2D blocks; block dimension x = texture width; // grid dimension x + block dimension y = texture height int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int idTextureRgb = blockIdx.y; int idTexturePixel = (id - idTextureRgb * texturePixels); int idTextureY = blockIdx.x * blockDim.y + threadIdx.y; int idTextureX = threadIdx.x; if (idTextureRgb < 3) // 3 channels that we will write to { // the texture is in BGR format, we want RGB switch (idTextureRgb) { case 0: // R idTextureRgb = 2; // B break; case 2: // B idTextureRgb = 0; // R break; } // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture float a = texture[aIndex]; target[tIndex] = target[tIndex] * (1.0f - a) + a * texture[id]; } } } /* Draws an RGB color into the masked area. The color is drawn in each pixel that has non-0 alpha. */ __global__ void DrawMaskedColorKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *textureMask, int textureWidth, int textureHeight, float r, float g, float b) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int idTextureRgb = id / texturePixels; int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster int idTextureY = idTexturePixel / textureWidth; int idTextureX = (idTexturePixel - idTextureY * textureWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idTextureRgb < 3) // only RGB channels are interesting { // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture float a = textureMask[aIndex]; if (a > 0) // mask allows color here { switch (idTextureRgb) { case 0: target[tIndex] = r; break; case 1: target[tIndex] = g; break; case 2: default: target[tIndex] = b; break; } } } } } /* Optimized version of DrawMaskedColorKernel : avoids division operations (~30% speedup) The width of the texture is in blockDim.x The height of the texture is distributed between blockDim.y and gridDim.x */ __global__ void DrawMaskedColorKernel2DBlock(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *textureMask, int textureWidth, int textureHeight, float r, float g, float b) { int id = blockDim.x * blockDim.y * (blockIdx.y * gridDim.x + blockIdx.x) + blockDim.x * threadIdx.y + threadIdx.x; // 2D grid of 2D blocks; block dimension x = texture width; // grid dimension x + block dimension y = texture height int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int idTextureRgb = blockIdx.y; int idTexturePixel = (id - idTextureRgb * texturePixels); int idTextureY = blockIdx.x * blockDim.y + threadIdx.y; int idTextureX = threadIdx.x; if (idTextureRgb < 3) // only RGB channels are interesting { // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture float a = textureMask[aIndex]; if (a > 0) // mask allows color here { switch (idTextureRgb) { case 0: target[tIndex] = r; break; case 1: target[tIndex] = g; break; case 2: default: target[tIndex] = b; break; } } } } } /* Convert Raw to RGB */ __global__ void ExtractRawComponentsToRgbKernel(float *target, int inputWidth, int inputHeight) { int pixelId = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; int imagePixels = inputWidth * inputHeight; if (pixelId >= imagePixels) return; unsigned int* uTarget = (unsigned int*)target; unsigned int pixel = uTarget[pixelId]; for (int i = 2; i >= 0; i--) { unsigned int component = pixel; component = component >> (8 * (2-i)); // 2-i == RGB -> BGR component = component & 0xFF; target[imagePixels * i + pixelId] = ((float)component)/255.0f; __syncthreads(); } } /* Convert Raw to RGB */ __global__ void RawToRgbKernel(float *source, float *target, int pixelCount) { int pixelId = blockDim.x*blockIdx.x + threadIdx.x; if (pixelId >= pixelCount) return; unsigned int pixel = AsUint(source, pixelId); for (int i = 0; i < 3; i++) // 3: don't care about alpha { target[pixelCount * i + pixelId] = GetComponent(pixel,2-i) / 255.0f; // /255.0f to re-scale from 0 to 1, // 2-i to convert between RGB and BGR } } /* Convert Raw to Raw grayscale http://stackoverflow.com/questions/687261/converting-rgb-to-grayscale-intensity */ __global__ void RawToRawGrayscaleKernel(float *source, float *target, int pixelCount) { int pixelId = blockDim.x*blockIdx.x + threadIdx.x; if (pixelId >= pixelCount) return; unsigned int pixel = AsUint(source, pixelId); unsigned int luminance = (unsigned int) (.2126f * GetComponent(pixel, 2) + .7152f * GetComponent(pixel, 1) + .0722f * GetComponent(pixel, 0)); unsigned int alpha = GetComponent(pixel, 3); *((unsigned int*)&target[pixelId]) = luminance | (luminance << 8) | (luminance << 16) | (alpha << 24); } /* Convert Raw to Grayscale http://stackoverflow.com/questions/687261/converting-rgb-to-grayscale-intensity */ __global__ void RawToGrayscaleKernel(float *source, float *target, int pixelCount) { int pixelId = blockDim.x*blockIdx.x + threadIdx.x; if (pixelId >= pixelCount) return; unsigned int pixel = AsUint(source, pixelId); float luminance = (.2126f * GetComponent(pixel, 2) + .7152f * GetComponent(pixel, 1) + .0722f * GetComponent(pixel, 0)); target[pixelId] = luminance / 255.0f; // to re-scale from 0 to 1 } }
the_stack
#include "solve.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <iostream> #ifdef _WIN32 typedef unsigned int uint32_t; //typedef unsigned short uint32_t; #endif using namespace std; #define PROFILE 0 #define USE_GRID 1 #define USE_BOX_PRUNING 0 #define kRadius 0.1f #define kMaxRadius (kRadius) #define kInvCellEdge (0.5f/kMaxRadius) #if USE_GRID typedef uint32_t CellId; #else typedef float CellId; #endif struct GrainSystem { public: Vec3* mPositions; Vec3* mVelocities; float* mRadii; Vec3* mSortedPositions; Vec3* mSortedVelocities; float* mSortedRadii; Vec3* mNewVelocities; uint32_t* mCellStarts; uint32_t* mCellEnds; CellId* mCellIds; uint32_t* mIndices; uint32_t mNumGrains; GrainParams mParams; }; #if PROFILE struct CudaTimer { CudaTimer(const char* name, cudaEvent_t start, cudaEvent_t stop, float& timer) : mTimer(timer), mName(name), mStart(start), mStop(stop) { cudaEventRecord(mStart, 0); } ~CudaTimer() { cudaEventRecord(mStop, 0); cudaEventSynchronize(mStop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, mStart, mStop); mTimer += elapsedTime; //cout << mName << " took: " << elapsedTime << endl; } float& mTimer; cudaEvent_t mStart; cudaEvent_t mStop; const char* mName; }; #else struct CudaTimer { CudaTimer(const char*, cudaEvent_t, cudaEvent_t, float& ) {} }; #endif void SortCellIndices(uint32_t* cellIds, uint32_t* particleIndices, uint32_t numGrains); void SortCellIndices(float* cellIds, uint32_t* particleIndices, uint32_t numGrains); __device__ inline float sqr(float x) { return x*x; } // calculate collision impulse __device__ inline Vec3 CollisionImpulse(Vec3 va, Vec3 vb, float ma, float mb, Vec3 n, float d, float baumgarte, float friction, float overlap) { // calculate relative velocity Vec3 vd = vb-va; // calculate relative normal velocity float vn = Dot(vd, n); Vec3 j = Vec3(0.0f, 0.0f, 0.0f); //if (vn < 0.0f) vn = min(vn, 0.0f); { // calculate relative tangential velocity Vec3 vt = vd - n*vn; float vtsq = Dot(vt, vt); float rcpvt = rsqrtf(vtsq);// + 0.001f); // position bias float bias = baumgarte*min(d+overlap, 0.0f); Vec3 jn = -(vn + bias)*n; Vec3 jt = max(friction*vn*rcpvt, -1.0f)*vt; // crappy static friction if (fabsf(vtsq*rcpvt) < fabsf(friction*vn*2.0f) && vn < 0.0f) jt = -vt; // total mass float msum = ma + mb; // normal impulse j = (jn + jt)*mb/msum; } return j; } #if USE_GRID const uint32_t kGridDim = 128; // transform a world space coordinate into cell coordinate __device__ inline uint32_t GridCoord(float x, float invCellEdge) { // offset to handle negative numbers float l = x+1000.0f; uint32_t c = (uint32_t)(floorf(l*invCellEdge)); return c; } __device__ inline uint32_t GridHash(int x, int y, int z) { uint32_t cx = x & (kGridDim-1); uint32_t cy = y & (kGridDim-1); uint32_t cz = z & (kGridDim-1); return cy*(kGridDim*kGridDim) + cx*kGridDim + cz; } /* __device__ inline uint32_t GridHash(int x, int y, int z) { const uint32_t p1 = 73856093; const uint32_t p2 = 19349663; const uint32_t p3 = 53471161; uint32_t n = x*p1 ^ y*p2 ^ z*p3; return n&(kGridDim*kGridDim*kGridDim-1); } */ __global__ void CreateCellIndices(const Vec3* positions, uint32_t* cellIds, uint32_t* particleIndices) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; Vec3 p = positions[i]; cellIds[i] = GridHash(GridCoord(p.x, kInvCellEdge), GridCoord(p.y, kInvCellEdge), GridCoord(p.z, kInvCellEdge)); particleIndices[i] = i; } __global__ void CreateGrid(const uint32_t* cellIds, uint32_t* cellStarts, uint32_t* cellEnds, uint32_t numGrains) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; // scan the particle-cell array to find the start and end uint32_t c = cellIds[i]; if (i == 0) { cellStarts[c] = i; } else { uint32_t p = cellIds[i-1]; if (c != p) { cellStarts[c] = i; cellEnds[p] = i; } } if (i == numGrains-1) { cellEnds[c] = i+1; } } __device__ inline Vec3 CollideSphere(Vec3 xa, Vec3 xb, Vec3 va, Vec3 vb, float ra, float rb, float baumgarte, float friction, float overlap) { // distance to sphere Vec3 t = xa - xb; Vec3 j = Vec3(0.0f, 0.0f, 0.0f); float d = Dot(t, t); float rsum = ra + rb; float mtd = d - sqr(rsum); if (mtd < 0.0f) { Vec3 n = Vec3(0.0f, 1.0f, 0.0f); if (d > 0.0f) { float rcpDist = rsqrtf(d); n = t * rcpDist; d = d * rcpDist; } j = CollisionImpulse(vb, va, 1.0f, 1.0f, n, d-rsum, baumgarte, friction, overlap); } return j; } __device__ inline Vec3 CollideCell(int index, int cx, int cy, int cz, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices, const Vec3* positions, const Vec3* velocities, const float* radii, Vec3 x, Vec3 v, float r, float baumgarte, float friction, float overlap) { Vec3 j = Vec3(0.0f, 0.0f, 0.0f); uint32_t cellIndex = GridHash(cx, cy, cz); uint32_t cellStart = cellStarts[cellIndex]; uint32_t cellEnd = cellEnds[cellIndex]; for (int i=cellStart; i < cellEnd; ++i) { uint32_t particleIndex = i;//indices[i]; if (particleIndex != index) { j += CollideSphere(x, positions[particleIndex], v, velocities[particleIndex], r, radii[particleIndex], baumgarte, friction, overlap); } } return j; } #endif __global__ void ReorderParticles(const Vec3* positions, const Vec3* velocities, const float* radii, Vec3* sortedPositions, Vec3* sortedVelocities, float* sortedRadii, const uint32_t* indices) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; int originalIndex = indices[i]; sortedPositions[i] = positions[originalIndex]; sortedVelocities[i] = velocities[originalIndex]; sortedRadii[i] = radii[originalIndex]; } __global__ void Collide(const Vec3* positions, const Vec3* velocities, const float* radii, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices, Vec3* newVelocities, int numGrains, GrainParams params, float dt, float scale) { const int index = blockIdx.x*blockDim.x + threadIdx.x; const Vec3 x = positions[index]; const Vec3 v = velocities[index]; const float r = radii[index]; Vec3 vd = Vec3(0.0f, 0.0f, 0.0f); #if USE_GRID // collide particles int cx = GridCoord(x.x, kInvCellEdge); int cy = GridCoord(x.y, kInvCellEdge); int cz = GridCoord(x.z, kInvCellEdge); for (int k=cz-1; k <= cz+1; ++k) { for (int j=cy-1; j <= cy+1; ++j) { for (int i=cx-1; i <= cx+1; ++i) { vd += CollideCell(index, i, j, k, cellStarts, cellEnds, indices, positions, velocities, radii, x, v, r, params.mBaumgarte, params.mFriction, params.mOverlap); } } } #endif // collide planes for (int i=0; i < params.mNumPlanes; ++i) { Vec4 p = params.mPlanes[i]; // distance to plane float d = x.x*p.x + x.y*p.y + x.z*p.z + p.w; float mtd = d - r; if (mtd < 0.0f) { vd += CollisionImpulse(Vec3(0.0f, 0.0f, 0.0f), v, 0.0f, 1.0f, Vec3(p.x, p.y, p.z), mtd, params.mBaumgarte, 0.8f, params.mOverlap); } } // write back velocity newVelocities[index] = v + vd * scale; } __global__ void IntegrateForce(Vec3* velocities, Vec3 gravity, float damp, float dt) { int index = blockIdx.x*blockDim.x + threadIdx.x; velocities[index] += (gravity - damp*velocities[index])*dt; } __global__ void IntegrateVelocity(Vec3* positions, Vec3* velocities, const Vec3* newVelocities, float dt) { int index = blockIdx.x*blockDim.x + threadIdx.x; // x += v*dt velocities[index] = newVelocities[index]; positions[index] += velocities[index]*dt; } /* __global__ void PrintCellCounts(uint32_t* cellStarts, uint32_t* cellEnds) { int index = blockIdx.x*blockDim.x + threadIdx.x; printf("%d\n", cellEnds[index]-cellStarts[index]); } */ //------------------------------------------------------------------ GrainSystem* grainCreateSystem(int numGrains) { GrainSystem* s = new GrainSystem(); s->mNumGrains = numGrains; cudaMalloc(&s->mPositions, numGrains*sizeof(Vec3)); cudaMalloc(&s->mVelocities, numGrains*sizeof(Vec3)); cudaMalloc(&s->mNewVelocities, numGrains*sizeof(Vec3)); cudaMalloc(&s->mRadii, numGrains*sizeof(float)); cudaMalloc(&s->mSortedPositions, numGrains*sizeof(Vec3)); cudaMalloc(&s->mSortedVelocities, numGrains*sizeof(Vec3)); cudaMalloc(&s->mSortedRadii, numGrains*sizeof(float)); // grid #if USE_GRID cudaMalloc(&s->mCellStarts, kGridDim*kGridDim*kGridDim*sizeof(uint32_t)); cudaMalloc(&s->mCellEnds, kGridDim*kGridDim*kGridDim*sizeof(uint32_t)); #endif cudaMalloc(&s->mCellIds, numGrains*sizeof(uint32_t)); cudaMalloc(&s->mIndices, numGrains*sizeof(uint32_t)); return s; } void grainDestroySystem(GrainSystem* s) { cudaFree(s->mPositions); cudaFree(s->mVelocities); cudaFree(s->mNewVelocities); cudaFree(s->mRadii); cudaFree(s->mSortedPositions); cudaFree(s->mSortedVelocities); cudaFree(s->mSortedRadii); #if USE_GRID cudaFree(s->mCellStarts); cudaFree(s->mCellEnds); #endif cudaFree(s->mCellIds); cudaFree(s->mIndices); delete s; } void grainSetSprings(GrainSystem* s, const uint32_t* springIndices, const float* springLengths, uint32_t numSprings) { /* s->mSpringIndices = (uint32_t*)malloc(numSprings*2*sizeof(uint32_t)); s->mSpringLengths = (float*)malloc(numSprings*sizeof(float)); memcpy(s->mSpringIndices, springIndices, numSprings*2*sizeof(uint32_t)); memcpy(s->mSpringLengths, springLengths, numSprings*sizeof(float)); s->mNumSprings = numSprings; */ } void grainSetPositions(GrainSystem* s, float* p, int n) { cudaMemcpy(&s->mPositions[0], p, sizeof(Vec3)*n, cudaMemcpyHostToDevice); } void grainSetVelocities(GrainSystem* s, float* v, int n) { cudaMemcpy(&s->mVelocities[0], v, sizeof(Vec3)*n, cudaMemcpyHostToDevice); } void grainSetRadii(GrainSystem* s, float* r) { cudaMemcpy(&s->mRadii[0], r, sizeof(float)*s->mNumGrains, cudaMemcpyHostToDevice); } void grainGetPositions(GrainSystem* s, float* p) { cudaMemcpy(p, &s->mPositions[0], sizeof(Vec3)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainGetVelocities(GrainSystem* s, float* v) { cudaMemcpy(v, &s->mVelocities[0], sizeof(Vec3)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainGetRadii(GrainSystem* s, float* r) { cudaMemcpy(r, &s->mRadii[0], sizeof(float)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainSetParams(GrainSystem* s, GrainParams* params) { //cudaMemcpy(s->mParams, params, sizeof(GrainParams), cudaMemcpyHostToDevice); s->mParams = *params; } void grainUpdateSystem(GrainSystem* s, float dt, int iterations, GrainTimers* timers) { //iterations = 10; dt /= iterations; const int kNumThreadsPerBlock = 128; const int kNumBlocks = s->mNumGrains / kNumThreadsPerBlock; GrainParams params = s->mParams; params.mBaumgarte /= dt; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaFuncSetCacheConfig(CreateCellIndices, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(CreateGrid, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ReorderParticles, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(IntegrateForce, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(IntegrateVelocity, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(Collide, cudaFuncCachePreferL1); for (int i=0; i < iterations; ++i) { { CudaTimer timer("CreateCellIndices", start, stop, timers->mCreateCellIndices); CreateCellIndices<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mCellIds, s->mIndices); } { CudaTimer timer("SortCellIndices", start, stop, timers->mSortCellIndices); SortCellIndices(s->mCellIds, s->mIndices, s->mNumGrains); } #if USE_GRID { CudaTimer timer("CreateGrid", start, stop, timers->mCreateGrid); cudaMemset(s->mCellStarts, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim); cudaMemset(s->mCellEnds, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim); CreateGrid<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mCellIds, s->mCellStarts, s->mCellEnds, s->mNumGrains); } #endif { CudaTimer timer("ReorderParticles", start, stop, timers->mReorder); ReorderParticles<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mVelocities, s->mRadii, s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mIndices); } //PrintCellCounts<<<kGridDim*kGridDim/kNumThreadsPerBlock, kNumThreadsPerBlock>>>(s->mCellStarts, s->mCellEnds); { float t; CudaTimer timer("Integrate Force", start, stop, t); IntegrateForce<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedVelocities, s->mParams.mGravity, s->mParams.mDamp, dt); } { CudaTimer timer("Collide", start, stop, timers->mCollide); float scale = 1;//float(i+1)/(iterations); Collide<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mCellStarts, s->mCellEnds, s->mIndices, s->mNewVelocities, s->mNumGrains, params, dt, scale); } { CudaTimer timer("Integrate", start, stop, timers->mIntegrate); IntegrateVelocity<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mNewVelocities, dt); } swap(s->mSortedPositions, s->mPositions); swap(s->mSortedVelocities, s->mVelocities); swap(s->mSortedRadii, s->mRadii); } cudaEventDestroy(start); cudaEventDestroy(stop); } #endif
the_stack
namespace lightseq { namespace cuda { /** @brief: ker_split_multilg_request the format of request in multilingual: e.g. <en> <de> <hello> <world> <.> request shape: [batch_size, src_seq_len + 2] request = numpy.concatenate((src_lang_id, trg_lang_id, src_token_id), axis=1) @thread gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS blockDim.x = MAX_THREADS @param req: [batch_size, src_seq_len + 2, hidden_dim] src_lang_id: [batch_size] trg_lang_id: [batch_size] src_token_id: [batch_size, src_seq_len, hidden_dim] req_len: src_seq_len + 2 */ __global__ void ker_split_multilg_request(const int *req, int *src_lang_id, int *trg_lang_id, int *src_token_id, int batch_size, int req_len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < batch_size * req_len) { int value = req[idx]; int seq_id = idx / req_len; int token_id = idx % req_len; if (token_id == 0) { src_lang_id[seq_id] = value; } else if (token_id == 1) { trg_lang_id[seq_id] = value; } else { int new_idx = flat_2dim(seq_id, token_id - 2, req_len - 2); src_token_id[new_idx] = value; } } } void launch_split_multilg_request(const int *req, int *src_lang_id, int *trg_lang_id, int *src_token_id, int batch_size, int req_len, cudaStream_t &stream) { if (req_len < 3) { throw std::runtime_error("req_len should be greater than 2"); } int nele = batch_size * req_len; int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; ker_split_multilg_request<<<nblock, MAX_THREADS, 0, stream>>>( req, src_lang_id, trg_lang_id, src_token_id, batch_size, req_len); } /** @brief: ker_enc_emb for encoder, look up token embedding, add position embedding @thread gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS blockDim.x = MAX_THREADS; @param token_emb: [vocab_size, hidden_dim] pos_emb: [max_step, hidden_dim] tokens: input token id, [batch_size, seq_len] output: result, [batch_size, seq_len, hidden_dim] pad_mask: record the padding token, [batch_size, seq_len] pad_id, the padding token id */ template <typename T> __global__ void ker_enc_emb(const T *token_emb, const T *pos_emb, const int *tokens, T *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); int tokens_idx = batch_idx * seq_len + seq_idx; int token = tokens[tokens_idx]; float4 value; if (token == pad_id) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; value.x += pemb.x; value.y += pemb.y; value.z += pemb.z; value.w += pemb.w; } ((float4 *)output)[idx] = value; } template <> __global__ void ker_enc_emb<__half>(const __half *token_emb, const __half *pos_emb, const int *tokens, __half *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); int tokens_idx = batch_idx * seq_len + seq_idx; int token = tokens[tokens_idx]; float4 value; if (token == pad_id) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; __half2 *value_h2 = (__half2 *)(&value); __half2 *pemb_h2 = (__half2 *)(&pemb); #pragma unroll for (int i = 0; i < 4; i++) { float2 value_f2 = __half22float2(value_h2[i]); float2 pemb_f2 = __half22float2(pemb_h2[i]); value_f2.x += pemb_f2.x; value_f2.y += pemb_f2.y; value_h2[i] = __float22half2_rn(value_f2); } } ((float4 *)output)[idx] = value; } /** @brief: ker_enc_emb_multilg_token for encoder, look up token embedding, add position embedding @thread gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS blockDim.x = MAX_THREADS; @param token_emb: [vocab_size, hidden_dim] pos_emb: [max_step, hidden_dim] tokens: input token id, [batch_size, seq_len] lang_emb: language embedding, [num_lang, hidden_dim] lang_id: language index, [batch_size] output: result, [batch_size, seq_len, hidden_dim] pad_mask: record the padding token, [batch_size, seq_len] pad_id, the padding token id */ template <typename T> __global__ void ker_enc_emb_multilg_token(const T *token_emb, const T *pos_emb, const int *tokens, const T *lang_emb, const int *lang_id, T *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); int tokens_idx = batch_idx * seq_len + seq_idx; int token = tokens[tokens_idx]; float4 value; if (token == pad_id) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; // add pos emb float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; value.x += pemb.x; value.y += pemb.y; value.z += pemb.z; value.w += pemb.w; // add lang emb pemb = ((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx]; value.x += pemb.x; value.y += pemb.y; value.z += pemb.z; value.w += pemb.w; } ((float4 *)output)[idx] = value; } template <> __global__ void ker_enc_emb_multilg_token<__half>( const __half *token_emb, const __half *pos_emb, const int *tokens, const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); int tokens_idx = batch_idx * seq_len + seq_idx; int token = tokens[tokens_idx]; float4 value; if (token == pad_id) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; __half2 *value_h2 = (__half2 *)(&value); float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; __half2 *pemb_h2 = (__half2 *)(&pemb); float4 lemb = ((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx]; __half2 *lemb_h2 = (__half2 *)(&lemb); #pragma unroll for (int i = 0; i < 4; i++) { float2 value_f2 = __half22float2(value_h2[i]); float2 pemb_f2 = __half22float2(pemb_h2[i]); float2 lemb_f2 = __half22float2(lemb_h2[i]); value_f2.x += pemb_f2.x + lemb_f2.x; value_f2.y += pemb_f2.y + lemb_f2.y; value_h2[i] = __float22half2_rn(value_f2); } } ((float4 *)output)[idx] = value; } /** @brief: ker_enc_emb_multilg_sentence for encoder, look up token embedding, add position embedding @thread gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS blockDim.x = MAX_THREADS; @param token_emb: [vocab_size, hidden_dim] pos_emb: [max_step, hidden_dim] tokens: input token id, [batch_size, seq_len] lang_emb: language embedding, [num_lang, hidden_dim] lang_id: language index, [batch_size] output: result, [batch_size, seq_len, hidden_dim] pad_mask: record the padding token, [batch_size, seq_len] pad_id, the padding token id */ template <typename T> __global__ void ker_enc_emb_multilg_sentence( const T *token_emb, const T *pos_emb, const int *tokens, const T *lang_emb, const int *lang_id, T *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); bool is_pad; int token_emb_idx; if (seq_idx == 0) { is_pad = false; token_emb = lang_emb; token_emb_idx = lang_id[batch_idx]; } else { token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1]; is_pad = (token_emb_idx == pad_id); } float4 value; int tokens_idx = batch_idx * seq_len + seq_idx; if (is_pad) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx]; float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; value.x += pemb.x; value.y += pemb.y; value.z += pemb.z; value.w += pemb.w; } ((float4 *)output)[idx] = value; } template <> __global__ void ker_enc_emb_multilg_sentence<__half>( const __half *token_emb, const __half *pos_emb, const int *tokens, const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * seq_len * hidden_dim) { return; } int batch_idx, seq_idx, dim_idx; decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); bool is_pad; int token_emb_idx; if (seq_idx == 0) { is_pad = false; token_emb = lang_emb; token_emb_idx = lang_id[batch_idx]; } else { token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1]; is_pad = (token_emb_idx == pad_id); } float4 value; int tokens_idx = batch_idx * seq_len + seq_idx; if (is_pad) { if (dim_idx == 0) { pad_mask[tokens_idx] = 1; } value.x = 0.f; value.y = 0.f; value.z = 0.f; value.w = 0.f; } else { if (dim_idx == 0) { pad_mask[tokens_idx] = 0; } value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx]; float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; __half2 *value_h2 = (__half2 *)(&value); __half2 *pemb_h2 = (__half2 *)(&pemb); #pragma unroll for (int i = 0; i < 4; i++) { float2 value_f2 = __half22float2(value_h2[i]); float2 pemb_f2 = __half22float2(pemb_h2[i]); value_f2.x += pemb_f2.x; value_f2.y += pemb_f2.y; value_h2[i] = __float22half2_rn(value_f2); } } ((float4 *)output)[idx] = value; } template <typename T> void launch_enc_emb(const T *token_emb, const T *pos_emb, const int *tokens, T *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim, cudaStream_t stream, const T *lang_emb, const int *lang_id, int multilg_type) { if (hidden_dim % 4 != 0) { throw std::runtime_error("violate hidden_dim % 4 = 0"); } hidden_dim >>= 2; int nele = batch_size * seq_len * hidden_dim; int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; if (multilg_type == 0) { ker_enc_emb<T><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } else if (multilg_type == 1) { ker_enc_emb_multilg_token<T><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } else { ker_enc_emb_multilg_sentence<T><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } } template <> void launch_enc_emb<__half>(const __half *token_emb, const __half *pos_emb, const int *tokens, __half *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim, cudaStream_t stream, const __half *lang_emb, const int *lang_id, int multilg_type) { if (hidden_dim % 8 != 0) { throw std::runtime_error("violate hidden_dim % 8 = 0"); } hidden_dim >>= 3; int nele = batch_size * seq_len * hidden_dim; int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; if (multilg_type == 0) { ker_enc_emb<__half><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } else if (multilg_type == 1) { ker_enc_emb_multilg_token<__half><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } else { ker_enc_emb_multilg_sentence<__half><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, batch_size, seq_len, hidden_dim); } } template void launch_enc_emb<float>(const float *token_emb, const float *pos_emb, const int *tokens, float *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim, cudaStream_t stream, const float *lang_emb, const int *lang_id, int multilg_type); template void launch_enc_emb<__half>(const __half *token_emb, const __half *pos_emb, const int *tokens, __half *output, int *pad_mask, int pad_id, int batch_size, int seq_len, int hidden_dim, cudaStream_t stream, const __half *lang_emb, const int *lang_id, int multilg_type); /** @brief: ker_dec_embedding for decoder, look up token embedding, add position embedding @thread gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS; blockDim.x = MAX_THREADS @param token_emb: [hidden_dim, vocab_size], note, it is different with encoder pos_emb: [max_step, hidden_dim] tokens: input token id, [batch_size, beam_size, max_step] lang_emb: language embedding, [num_lang, hidden_dim] lang_id: language index, [batch_size] output: result, [batch_size, beam_size, hidden_dim] step: current decoder step max_step: max decoder steps multilg_type: 0 for no multilg, 1 for token level multilg, 2 for sentence level multilg */ template <typename T> __global__ void ker_dec_emb(const T *token_emb, const T *pos_emb, int *tokens, const T *lang_emb, const int *lang_id, T *output, int batch_size, int beam_size, int hidden_dim, int vocab_size, int step, int max_step, int multilg_type) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size * beam_size * hidden_dim) { return; } int batch_idx, beam_idx, dim_idx; decompose_3dim(idx, beam_size, hidden_dim, &batch_idx, &beam_idx, &dim_idx); T emb; if ((multilg_type == 2 || multilg_type == 3) && step == 0) { // the bos of sentense level multilg is target lang id int lid = lang_id[batch_idx]; emb = lang_emb[flat_2dim(lid, dim_idx, hidden_dim)]; tokens[flat_3dim(batch_idx, beam_idx, 0, beam_size, max_step)] = lid; } else { int token = tokens[flat_3dim(batch_idx, beam_idx, step, beam_size, max_step)]; emb = token_emb[flat_2dim(dim_idx, token, vocab_size)]; } float value = float(emb) + float(pos_emb[flat_2dim(step, dim_idx, hidden_dim)]); if (multilg_type == 1) { // token level multilg, add lang_emb value += float(lang_emb[flat_2dim(lang_id[batch_idx], dim_idx, hidden_dim)]); } output[idx] = T(value); } template <typename T> void launch_dec_emb(const T *token_emb, const T *pos_emb, int *tokens, const T *lang_emb, const int *lang_id, T *output, int batch_size, int beam_size, int hidden_dim, int vocab_size, int step, int max_step, int multilg_type, cudaStream_t stream) { if (step >= max_step) { throw std::runtime_error("violate step < max_step"); } int nele = batch_size * beam_size * hidden_dim; int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; ker_dec_emb<T><<<nblock, MAX_THREADS, 0, stream>>>( token_emb, pos_emb, tokens, lang_emb, lang_id, output, batch_size, beam_size, hidden_dim, vocab_size, step, max_step, multilg_type); } template void launch_dec_emb<float>(const float *token_emb, const float *pos_emb, int *tokens, const float *lang_emb, const int *lang_id, float *output, int batch_size, int beam_size, int hidden_dim, int vocab_size, int step, int max_step, int multilg_type, cudaStream_t stream); template void launch_dec_emb<__half>(const __half *token_emb, const __half *pos_emb, int *tokens, const __half *lang_emb, const int *lang_id, __half *output, int batch_size, int beam_size, int hidden_dim, int vocab_size, int step, int max_step, int multilg_type, cudaStream_t stream); /** @brief: ker_patch_emb patch embedding by conv2d, concat cls embedding, add position embedding @thread gridDim.x = batch_size gridDim.y = max_step gridDim.z = hidden_dim blockDim.x = MAX_THREADS @param conv_weight: [hidden_dim, channel_input, patch_size, patch_size] conv_bias: [hidden_dim] pos_emb: [max_step, hidden_dim] cls_emb: [hidden_dim] input: [batch_size, channel_input, image_size, image_size] output: result, [batch_size, max_step, hidden_dim] */ template <typename T> __global__ void ker_patch_emb(const T *conv_weight, const T *conv_bias, const T *pos_emb, const T *cls_emb, const float *input, T *output, int patch_size, int image_size, int channel_input) { if (blockIdx.y == 0) { if (threadIdx.x == 0) { output[flat_3dim(blockIdx.x, 0, blockIdx.z, gridDim.y, gridDim.z)] = __ldg(&cls_emb[blockIdx.z]) + __ldg(&pos_emb[blockIdx.z]); } return; } int val_num_per_block = channel_input * patch_size * patch_size; int patch_row_id, patch_col_id, value_row_id, value_col_id, channel_id; decompose_2dim(blockIdx.y - 1, image_size / patch_size, &patch_row_id, &patch_col_id); float val = 0.f; for (int idx = threadIdx.x; idx < val_num_per_block; idx += blockDim.x) { decompose_3dim(idx, patch_size, patch_size, &channel_id, &value_row_id, &value_col_id); int conv_weight_offset = flat_2dim(blockIdx.z, idx, val_num_per_block); int in_offset = flat_4dim(blockIdx.x, channel_id, patch_row_id * patch_size + value_row_id, patch_col_id * patch_size + value_col_id, channel_input, image_size, image_size); val += __ldg(&input[in_offset]) * (float)__ldg(&conv_weight[conv_weight_offset]); } float rsum = blockReduceSum(val); if (threadIdx.x == 0) { float out_float; int out_offset = flat_3dim(blockIdx.x, blockIdx.y, blockIdx.z, gridDim.y, gridDim.z); out_float = rsum + (float)__ldg(&conv_bias[blockIdx.z]) + (float)__ldg(&pos_emb[flat_2dim(blockIdx.y, blockIdx.z, gridDim.z)]); output[out_offset] = (T)out_float; } } template <typename T> void launch_patch_emb(const T *conv_weight, const T *conv_bias, const T *pos_emb, const T *cls_emb, const float *input, T *output, int patch_size, int image_size, int batch_size, int max_step, int hidden_dim, int channel_input, cudaStream_t stream) { ker_patch_emb<T> <<<dim3(batch_size, max_step, hidden_dim), MAX_THREADS, 0, stream>>>( conv_weight, conv_bias, pos_emb, cls_emb, input, output, patch_size, image_size, channel_input); } template void launch_patch_emb<float>( const float *conv_weight, const float *conv_bias, const float *pos_emb, const float *cls_emb, const float *input, float *output, int patch_size, int image_size, int batch_size, int max_step, int hidden_dim, int channel_input, cudaStream_t stream); template void launch_patch_emb<__half>( const __half *conv_weight, const __half *conv_bias, const __half *pos_emb, const __half *cls_emb, const float *input, __half *output, int patch_size, int image_size, int batch_size, int max_step, int hidden_dim, int channel_input, cudaStream_t stream); } // namespace cuda } // namespace lightseq
the_stack
#include <algorithm> #include <limits> #include <nppdefs.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #include "Common.cuh" // --------------------------------- // IsNan // --------------------------------- template<typename T> __global__ void kernal_Tensor_IsnNan ( int *result, const T *buf, int size ) { int index = blockDim.x * blockIdx.x + threadIdx.x; if ( index >= size ) { return; } if ( isnan(buf[index]) ) { result[0] = 1; } } template<typename T> BBCU_DLL_EXPORT int bbcu_Tensor_IsnNan ( int *dev_result, T const *dev_buf, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(int))); dim3 block(1024); dim3 grid((size+1023)/1024); kernal_Tensor_IsnNan<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_Tensor_IsnNan<float> (int *, float const *, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_Tensor_IsnNan<double>(int *, double const *, int, cudaStream_t); template<typename T> __global__ void kernal_FrameBuf_IsnNan ( int *result, const T *buf, int node_size, int frame_size, int frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; int node = blockDim.y * blockIdx.y + threadIdx.y; if (frame >= frame_size || node >= node_size) { return; } if ( isnan(buf[frame_stride*node + frame]) ) { *result = 1; } } template<typename T> BBCU_DLL_EXPORT int bbcu_FrameBuf_IsnNan ( int *dev_result, const T *dev_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(int))); dim3 block; dim3 grid; block.x = std::min(frame_size, 1024); block.y = std::min(node_size, 1024); while (block.y > 1 && block.x * block.y > 1024) { block.y = (block.y + 1) / 2; } while (block.x > 1 && block.x * block.y > 1024) { block.x = (block.x + 1) / 2; } grid.x = (frame_size + (block.x - 1)) / block.x; grid.y = (node_size + (block.y - 1)) / block.y; kernal_FrameBuf_IsnNan<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_FrameBuf_IsnNan<float> (int *, float const *, int, int, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_FrameBuf_IsnNan<double>(int *, double const *, int, int, int, cudaStream_t); // --------------------------------- // min // --------------------------------- template<typename T> __global__ void kernal_Tensor_Min ( T *result, const T *buf, int size ) { int id = threadIdx.x; int step = blockDim.x; T value = bb_type_max<T>(); for (int index = id; index < size; index += step) { value = min(value, buf[index]); } value = device_ShuffleMin(value); if (id == 0) { *result = value; } } template<typename T> BBCU_DLL_EXPORT int bbcu_Tensor_Min ( T *dev_result, T const *dev_buf, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(T))); dim3 block(32); dim3 grid(1); kernal_Tensor_Min<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_Tensor_Min<float> (float *, float const *, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_Tensor_Min<double>(double *, double const *, int, cudaStream_t); template<typename T> __global__ void kernal_FrameBuf_Min ( T *result, T const *buf, int node_size, int frame_size, int frame_stride ) { int id = threadIdx.x; int step = blockDim.x; T value = bb_type_max<T>(); for (int node = 0; node < node_size; node++) { for (int frame = id; frame < frame_size; frame += step) { value = min(value, buf[node*frame_stride + frame]); } } value = device_ShuffleMin(value); if (id == 0) { *result = value; } } template<typename T> BBCU_DLL_EXPORT int bbcu_FrameBuf_Min ( T *dev_result, T const *dev_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(int))); dim3 block(32); dim3 grid(1); kernal_FrameBuf_Min<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_FrameBuf_Min<float> (float *, float const *, int, int, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_FrameBuf_Min<double>(double *, double const *, int, int, int, cudaStream_t); // --------------------------------- // max // --------------------------------- template<typename T> __global__ void kernal_Tensor_Max ( T *result, const T *buf, int size ) { int id = threadIdx.x; int step = blockDim.x; T value = bb_type_lowest<T>(); for (int index = id; index < size; index += step) { value = max(value, buf[index]); } value = device_ShuffleMax(value); if (id == 0) { *result = value; } } template<typename T> BBCU_DLL_EXPORT int bbcu_Tensor_Max ( T *dev_result, T const *dev_buf, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(T))); dim3 block(32); dim3 grid(1); kernal_Tensor_Max<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_Tensor_Max<float> (float *, float const *, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_Tensor_Max<double>(double *, double const *, int, cudaStream_t); template<typename T> __global__ void kernal_FrameBuf_Max ( T *result, T const *buf, int node_size, int frame_size, int frame_stride ) { int id = threadIdx.x; int step = blockDim.x; T value = bb_type_lowest<T>(); for (int node = 0; node < node_size; node++) { for (int frame = id; frame < frame_size; frame += step) { value = max(value, buf[node*frame_stride + frame]); } } value = device_ShuffleMax(value); if (id == 0) { *result = value; } } template<typename T> BBCU_DLL_EXPORT int bbcu_FrameBuf_Max ( T *dev_result, T const *dev_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(int))); dim3 block(32); dim3 grid(1); kernal_FrameBuf_Max<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_FrameBuf_Max<float> (float *, float const *, int, int, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_FrameBuf_Max<double>(double *, double const *, int, int, int, cudaStream_t); // --------------------------------- // momnet // --------------------------------- template<typename T> __global__ void kernal_Tensor_Moment ( double *result, const T *buf, int size ) { int id = threadIdx.x; int step = blockDim.x; double m0_sum = 0; double m1_sum = 0; double m2_sum = 0; double m3_sum = 0; double m4_sum = 0; T min_val = bb_type_max<T>(); T max_val = bb_type_lowest<T>(); for (int index = id; index < size; index += step) { T v = buf[index]; if ( ! isnan(v) ) { min_val = min(min_val, v); max_val = max(max_val, v); double m0 = 1.0; double m1 = (double)v; double m2 = m1*m1; double m3 = m2*m1; double m4 = m2*m2; m0_sum += m0; m1_sum += m1; m2_sum += m2; m3_sum += m3; m4_sum += m4; } } m0_sum = device_ShuffleSum(m0_sum); m1_sum = device_ShuffleSum(m1_sum); m2_sum = device_ShuffleSum(m2_sum); m3_sum = device_ShuffleSum(m3_sum); m4_sum = device_ShuffleSum(m4_sum); min_val = device_ShuffleMin(min_val); max_val = device_ShuffleMax(max_val); if (id == 0) { result[0] = m0_sum; result[1] = m1_sum; result[2] = m2_sum; result[3] = m3_sum; result[4] = m4_sum; result[5] = (double)min_val; result[6] = (double)max_val; } } template<typename T> BBCU_DLL_EXPORT int bbcu_Tensor_Moment ( double *dev_result, T const *dev_buf, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); dim3 block(32); dim3 grid(1); kernal_Tensor_Moment<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_Tensor_Moment<float> (double *, float const *, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_Tensor_Moment<double>(double *, double const *, int, cudaStream_t); template<typename T> __global__ void kernal_FrameBuf_Moment ( double *result, T const *buf, int node_size, int frame_size, int frame_stride ) { int id = threadIdx.x; int step = blockDim.x; double m0_sum = 0; double m1_sum = 0; double m2_sum = 0; double m3_sum = 0; double m4_sum = 0; T min_val = bb_type_max<T>(); T max_val = bb_type_lowest<T>(); for (int node = 0; node < node_size; node++) { for (int frame = id; frame < frame_size; frame += step) { T v = buf[node*frame_stride + frame]; if ( ! isnan(v) ) { min_val = min(min_val, v); max_val = max(max_val, v); double m0 = 1.0; double m1 = (double)v; double m2 = m1*m1; double m3 = m2*m1; double m4 = m2*m2; m0_sum += m0; m1_sum += m1; m2_sum += m2; m3_sum += m3; m4_sum += m4; } } } m0_sum = device_ShuffleSum(m0_sum); m1_sum = device_ShuffleSum(m1_sum); m2_sum = device_ShuffleSum(m2_sum); m3_sum = device_ShuffleSum(m3_sum); m4_sum = device_ShuffleSum(m4_sum); min_val = device_ShuffleMin(min_val); max_val = device_ShuffleMax(max_val); if (id == 0) { result[0] = m0_sum; result[1] = m1_sum; result[2] = m2_sum; result[3] = m3_sum; result[4] = m4_sum; result[5] = (double)min_val; result[6] = (double)max_val; } } template<typename T> BBCU_DLL_EXPORT int bbcu_FrameBuf_Moment ( double *dev_result, T const *dev_buf, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // BB_CUDA_SAFE_CALL(cudaMemset(dev_result, 0, sizeof(int))); dim3 block(32); dim3 grid(1); kernal_FrameBuf_Moment<T><<<grid, block, 0, streamId>>>( dev_result, dev_buf, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } template BBCU_DLL_EXPORT int bbcu_FrameBuf_Moment<float> (double *, float const *, int, int, int, cudaStream_t); template BBCU_DLL_EXPORT int bbcu_FrameBuf_Moment<double>(double *, double const *, int, int, int, cudaStream_t); // end of file
the_stack
#include "dali/kernels/normalize/normalize_gpu.h" // NOLINT #include "dali/kernels/normalize/normalize_gpu_impl.cuh" // NOLINT #include <gtest/gtest.h> #include <cmath> #include <initializer_list> #include <iostream> #include <random> #include <utility> #include "dali/core/cuda_event.h" #include "dali/kernels/kernel_manager.h" #include "dali/test/device_test.h" #include "dali/test/test_tensors.h" #include "dali/test/tensor_test_utils.h" namespace dali { namespace kernels { template <bool calc_inv_stddev, typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float epsilon, float global_scale, float shift, TensorShape<> &data_pos, TensorShape<> &base_pos, TensorShape<> &scale_pos, int dim) { int db = 0, ds = 0; int64_t extent = 0; if (dim < in.dim()) { db = base.shape[dim] > 1 ? 1 : 0; ds = scale.shape[dim] > 1 ? 1 : 0; extent = in.shape[dim]; } if (dim >= in.dim() - 1) { // handles both last dimension and degenerate case Out *optr = out(data_pos); const In *iptr = in(data_pos); const float *sptr = scale(scale_pos); const float *bptr = base(base_pos); for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { float mul; if (calc_inv_stddev) { float x = sptr[s] * sptr[s] + epsilon; mul = x ? rsqrt(x) * global_scale : 0; } else { mul = sptr[s] * global_scale; } optr[i] = ConvertSat<Out>(std::fma(iptr[i] - bptr[b], mul, shift)); } } else { for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { data_pos[dim] = i; base_pos[dim] = b; scale_pos[dim] = s; RefNormalize<calc_inv_stddev>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, dim + 1); } } } /** * @brief Reference normalization of a single tensor * * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev, float epsilon) { TensorShape<> data_pos, base_pos, scale_pos; int D = in.dim(); data_pos.resize(D); base_pos.resize(D); scale_pos.resize(D); if (calc_inv_stddev) { RefNormalize<true>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } else { RefNormalize<false>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } } /** * @brief Reference implementation of normalization * * Goes over all input samples and normalizes them using given base and scale tensor lists. * If base/scale TL has 1 element, it is reused for normalization of all samples. * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutListCPU<Out> &out, const TensorListView<StorageCPU, In> &in, const InListCPU<float> &base, const InListCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev = false, float epsilon = 0) { assert(out.shape == in.shape); int N = in.num_samples(); int db = base.num_samples() > 1; int ds = scale.num_samples() > 1; for (int i = 0, b = 0, s = 0; i < N; i++, b += db, s += ds) { RefNormalize<Out, In>(out[i], in[i], base[b], scale[s], global_scale, shift, calc_inv_stddev, epsilon); } } template <typename RNG> TensorListShape<> RandomDataShape(int num_samples, int ndim, int64_t max_volume, uint64_t reduced_axes, bool reduce_batch, RNG &rng) { assert(max_volume >= 1); TensorListShape<> sh; sh.resize(num_samples, ndim); int64_t extent_range = std::ceil(pow(max_volume, 1.0 / ndim)); std::uniform_int_distribution<int64_t> shape_dist(1, extent_range); for (int i = 0; i < num_samples; i++) { auto sample_shape = sh.tensor_shape_span(i); do { for (int d = 0; d < ndim; d++) { // when reducing samples in the batch, the non-reduced extents must be uniform // across all samples sample_shape[d] = reduced_axes & (1_u64 << d) || !reduce_batch || i == 0 ? shape_dist(rng) : sh.tensor_shape_span(0)[d]; } } while (volume(sample_shape) > max_volume); } return sh; } /** * @brief Creates a tensor list which contains a repeated scalar * * If ndim > 0, then the tensor list will contain 1x1x...x1 tensors with given dimensionality */ template <typename T> TensorListView<StorageCPU, T> ScalarTLV(T &scalar, int num_samples, int ndim = 0) { TensorListView<StorageCPU, T> tlv; TensorShape<> ts; ts.resize(ndim); for (int d = 0; d < ndim; d++) ts[d] = 1; tlv.shape = uniform_list_shape(num_samples, ts); tlv.data.resize(num_samples); for (int i = 0 ; i < num_samples; i++) tlv.data[i] = &scalar; return tlv; } template <typename Params> class NormalizeImplGPUTest; template <typename Out, typename In> class NormalizeImplGPUTest<std::pair<Out, In>> : public ::testing::Test { public: // this will test both the top-level pImpl class and the internal implementation class using Kernel = std::conditional_t<std::is_same<Out, In>::value, NormalizeGPU<Out, In>, normalize_impl::NormalizeImplGPU<Out, In, float, float> >; void Init(int num_samples, int ndim, int64_t max_sample_volume, std::initializer_list<int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { Init(num_samples, ndim, max_sample_volume, { reduced_axes.begin(), reduced_axes.end() }, reduce_batch, scalar_base, scalar_scale, scale_is_stddev); } void Init(int num_samples, int ndim, int64_t max_sample_volume, span<const int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { In lo = 0, hi = 100; use_scalar_base_ = scalar_base; use_scalar_scale_ = scalar_scale; axis_mask_ = to_bit_mask(reduced_axes); reduced_axes_ = { begin(reduced_axes), end(reduced_axes) }; reduce_batch_ = reduce_batch; scale_is_stddev_ = scale_is_stddev; data_shape_ = RandomDataShape(num_samples, ndim, max_sample_volume, axis_mask_, reduce_batch_, rng_); in_.reshape(data_shape_); UniformRandomFill(in_.cpu(), rng_, lo, hi); if (!scalar_base || !scalar_scale) { int param_samples = reduce_batch ? 1 : num_samples; param_shape_.resize(param_samples, ndim); for (int i = 0; i < param_samples; i++) { for (int d = 0; d < ndim; d++) { bool reduced = axis_mask_ & (1_u64 << d); param_shape_.tensor_shape_span(i)[d] = reduced ? 1 : data_shape_.tensor_shape_span(i)[d]; } } } else { param_shape_.resize(1, 0); } auto scale_dist = uniform_distribution(0.1f, 10.0f); if (scalar_scale) { scalar_scale_ = scale_dist(rng_); } else { scale_.reshape(param_shape_); UniformRandomFill(scale_.cpu(), rng_, scale_dist.a(), scale_dist.b()); } if (scalar_base) { scalar_base_ = uniform_distribution(lo, hi)(rng_); } else { base_.reshape(param_shape_); UniformRandomFill(base_.cpu(), rng_, lo, hi); } if (std::is_integral<Out>::value) { global_scale_ = std::exp2f(7 * sizeof(Out)) / hi; // scale to half range if (std::is_unsigned<Out>::value) shift_ = global_scale_; // shift half range up } } void RunTest() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; for (int iter = 0; iter < 3; iter++) { auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); ref_.reshape(data_shape_); Launch(ctx); int param_samples = param_shape_.num_samples(); auto ref_base = use_scalar_base_ ? ScalarTLV(scalar_base_, param_samples, data_shape_.sample_dim()) : base_.cpu(); auto ref_scale = use_scalar_scale_ ? ScalarTLV(scalar_scale_, param_samples, data_shape_.sample_dim()) : scale_.cpu(); RefNormalize(ref_.cpu(), in_.cpu(), ref_base, ref_scale, global_scale_, shift_, scale_is_stddev_, epsilon_); if (scale_is_stddev_ && !std::is_integral<Out>::value) Check(out_.cpu(), ref_.cpu(), EqualEpsRel(1e-6, 1e-6)); else Check(out_.cpu(), ref_.cpu(), EqualUlp(4)); } } void RunPerf() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); auto out_gpu = out_.gpu(); CUDA_CALL( cudaMemsetAsync(out_gpu.data[0], 0, sizeof(Out) * out_gpu.num_elements(), ctx.gpu.stream)); Launch(ctx); CUDA_CALL(cudaEventRecord(start, ctx.gpu.stream)); Launch(ctx); CUDA_CALL(cudaEventRecord(end, ctx.gpu.stream)); float time; CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaEventElapsedTime(&time, start, end)); time *= 1e+6f; // convert to nanoseconds int64_t out_size = data_shape_.num_elements() * sizeof(Out); int64_t in_size = data_shape_.num_elements() * sizeof(In); int64_t base_size = scalar_base_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t scale_size = scalar_scale_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t data_size = out_size + in_size + base_size + scale_size; std::cerr << "Throughput: " << data_size / time << " GB/s\n"; } void Launch(KernelContext &ctx) { if (use_scalar_base_) { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scale_.gpu(), global_scale_, shift_, epsilon_); } } else { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scale_.gpu(), global_scale_, shift_, epsilon_); } } } protected: KernelManager kmgr_; TestTensorList<In> in_; TestTensorList<Out> out_; TestTensorList<float> ref_; TestTensorList<float> base_, scale_; TensorListShape<> data_shape_, param_shape_; SmallVector<int, 6> reduced_axes_; uint64_t axis_mask_; bool reduce_batch_ = false; bool use_scalar_base_ = false; bool use_scalar_scale_ = false; bool scale_is_stddev_ = false; float scalar_base_ = 0, scalar_scale_ = 1; float global_scale_ = 1.25f, shift_ = 0.1f, epsilon_ = 0.2f; std::mt19937_64 rng_; }; using NormalizeTestTypes = ::testing::Types< std::pair<int16_t, uint8_t>, std::pair<float, uint16_t>, std::pair<float, float>>; TYPED_TEST_SUITE(NormalizeImplGPUTest, NormalizeTestTypes); TYPED_TEST(NormalizeImplGPUTest, NonScalar) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams) { this->Init(10, 4, 10000, {}, false, true, true, false); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, NonScalar_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams_InvStdDev) { this->Init(10, 4, 10000, {}, false, true, true, true); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams) { this->Init(64, 3, 1<<20, {}, false, true, true, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D_InvStdDev) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01_InvStdDev) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12_InvStdDev) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams_InvStdDev) { this->Init(64, 3, 1<<20, {}, false, true, true, true); this->RunPerf(); } } // namespace kernels } // namespace dali
the_stack
#include "matrix_common.hpp" #include "matrix_fine.hpp" namespace arb { namespace gpu { namespace kernels { // // gather and scatter kernels // // to[i] = from[p[i]] template <typename T, typename I> __global__ void gather(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[i] = from[p[i]]; } } // to[p[i]] = from[i] template <typename T, typename I> __global__ void scatter(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[p[i]] = from[i]; } } /// GPU implementation of Hines matrix assembly. /// Fine layout. /// For a given time step size dt: /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve. template <typename T, typename I> __global__ void assemble_matrix_fine( T* __restrict__ const d, T* __restrict__ const rhs, const T* __restrict__ const invariant_d, const T* __restrict__ const voltage, const T* __restrict__ const current, const T* __restrict__ const conductivity, const T* __restrict__ const cv_capacitance, const T* __restrict__ const area, const I* __restrict__ const cv_to_intdom, const T* __restrict__ const dt_intdom, const I* __restrict__ const perm, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < n) { // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units μS (micro-Siemens). // See the model documentation in docs/model for more information. const auto dt = dt_intdom[cv_to_intdom[tid]]; const auto p = dt > 0; const auto pid = perm[tid]; const auto area_factor = T(1e-3)*area[tid]; const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid]; const auto r_d = gi + invariant_d[tid]; const auto r_rhs = gi*voltage[tid] - area_factor*current[tid]; d[pid] = p ? r_d : 0; rhs[pid] = p ? r_rhs : voltage[tid]; } } /// GPU implementation of Hines Matrix solver. /// Fine-grained tree based solver. /// Each block solves a set of matricesb iterating over the levels of matrix /// and perfoming a backward and forward substitution. On each level one thread /// gets assigned to one branch on this level of a matrix and solves and /// performs the substitution. Afterwards all threads continue on the next /// level. /// To avoid idle threads, one should try that on each level, there is a similar /// number of branches. template <typename T> __global__ void solve_matrix_fine( T* __restrict__ const rhs, T* __restrict__ const d, const T* __restrict__ const u, const level_metadata* __restrict__ const level_meta, const fvm_index_type* __restrict__ const level_lengths, const fvm_index_type* __restrict__ const level_parents, const fvm_index_type* __restrict__ const block_index, const fvm_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells { const auto tid = threadIdx.x; const auto bid = blockIdx.x; const auto first_level = block_index[bid]; const auto num_levels = block_index[bid + 1] - first_level; const auto block_level_meta = &level_meta[first_level]; // backward substitution for (unsigned l=0; l<num_levels-1; ++l) { // Metadata for this level and the next level const auto& lvl_meta = block_level_meta[l]; const auto& next_lvl_meta = block_level_meta[l+1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; // Perform backward substitution for each branch on this level. // One thread per branch. if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + tid; // Zero diagonal term implies dt==0; just leave rhs (for whole matrix) // alone in that case. // Each cell has a different `dt`, because we choose time step size // according to when the next event is arriving at a cell. So, some // cells require more time steps than others, but we have to solve // all the matrices at the same time. When a cell finishes, we put a // `0` on the diagonal to mark that it should not be solved for. if (d[pos]!=0) { // each branch perform substitution for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const auto d_next = d[next_pos]; const auto rhs_next = rhs[next_pos]; const T factor = -u[pos]/d[pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } // Update d and rhs at the parent node of this branch. // A parent may have more than one contributing to it, so we use // atomic updates to avoid races conditions. const unsigned parent_index = next_lvl_meta.matrix_data_index; const unsigned p = parent_index + lvl_parents[tid]; const T factor = -u[pos] / d[pos]; gpu_atomic_add(d + p, factor*u[pos]); gpu_atomic_add(rhs + p, factor*rhs[pos]); } } __syncthreads(); } // Solve the root { // The levels are sorted such that the root is the last level const auto& last_lvl_meta = block_level_meta[num_levels-1]; const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index; const unsigned width = num_matrix[bid]; if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = last_lvl_meta.matrix_data_index + tid; if (d[pos]!=0) { // backward for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const T factor = -u[pos] / d[pos]; const auto rhs_next = rhs[next_pos]; const auto d_next = d[next_pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } auto rhsp = rhs[pos] / d[pos]; rhs[pos] = rhsp; pos -= width; // forward for (unsigned i=0; i<len-1; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } // forward substitution // take great care with loop limits decrementing unsigned counter l for (unsigned l=num_levels-1; l>0; --l) { const auto& lvl_meta = block_level_meta[l-1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; const unsigned parent_index = block_level_meta[l].matrix_data_index; __syncthreads(); // Perform forward-substitution for each branch on this level. // One thread per branch. if (tid < width) { // Find the index of the first node in this branch. const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid; if (d[pos]!=0) { // Load the rhs value for the parent node of this branch. const unsigned p = parent_index + lvl_parents[tid]; T rhsp = rhs[p]; // each branch perform substitution for (unsigned i=0; i<len; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } } } // namespace kernels void gather( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); kernels::gather<<<griddim, blockdim>>>(from, to, p, n); } void scatter( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); kernels::scatter<<<griddim, blockdim>>>(from, to, p, n); } void assemble_matrix_fine( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* conductivity, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_index_type* cv_to_intdom, const fvm_value_type* dt_intdom, const fvm_index_type* perm, unsigned n) { const unsigned block_dim = 128; const unsigned num_blocks = impl::block_count(n, block_dim); kernels::assemble_matrix_fine<<<num_blocks, block_dim>>>( d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area, cv_to_intdom, dt_intdom, perm, n); } // Example: // // block 0 block 1 block 2 // .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~ // // L0 \ / L5 \ / // \/ \/ // L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . . // \ / \ / \ / \|/ \ / \ / // L2 | | L4 | | | L7 | // | | | | | | // // levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ] // block_index = [0, 3, 5, 8, ...] // num_levels = [3, 2, 3, ...] // num_cells = [2, 3, ...] // num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size() void solve_matrix_fine( fvm_value_type* rhs, fvm_value_type* d, // diagonal values const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD) const level_metadata* level_meta, // information pertaining to each level const fvm_index_type* level_lengths, // lengths of branches of every level concatenated const fvm_index_type* level_parents, // parents of branches of every level concatenated const fvm_index_type* block_index, // start index into levels for each gpu block fvm_index_type* num_cells, // the number of cells packed into this single matrix fvm_index_type* padded_size, // length of rhs, d, u, including padding unsigned num_blocks, // number of blocks unsigned blocksize) // size of each block { kernels::solve_matrix_fine<<<num_blocks, blocksize>>>( rhs, d, u, level_meta, level_lengths, level_parents, block_index, num_cells); } } // namespace gpu } // namespace arb
the_stack
struct is_even { __host__ __device__ bool operator()(const int &x) { return (x % 2) == 0; } }; using namespace mgpu; using namespace std; using namespace thrust::placeholders; size_t int_size = sizeof(int_type); size_t float_size = sizeof(float_type); queue<string> namevars; queue<string> typevars; queue<int> sizevars; queue<int> cols; queue<unsigned int> j_col_count; unsigned int sel_count = 0; unsigned int join_cnt = 0; unsigned int distinct_cnt = 0; unsigned int join_col_cnt = 0; unsigned int join_tab_cnt = 0; unsigned int tab_cnt = 0; queue<string> op_join; queue<char> join_type; queue<char> join_eq_type; unsigned int partition_count; map<string,unsigned int> mystat; map<unsigned int, unsigned int> join_and_cnt; map<string, map<string, bool> > used_vars; bool save_dict = 0; thrust::device_vector<unsigned char> scratch; map<string, string> filter_var; thrust::device_vector<int> ranj; unsigned long long int currtime; standard_context_t context; map<string, char> varencoding; void check_used_vars() { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { auto s = (*it).second; auto vars(op_value); while(!vars.empty()) { if(s.count(vars.front()) != 0) { used_vars[(*it).first][vars.front()] = 1; }; vars.pop(); } }; } void emit_name(const char *name) { op_type.push("NAME"); op_value.push(name); } void emit_limit(const int val) { op_nums.push(val); } void emit_string(const char *str) { // remove the float_type quotes if(str[0] == '"') { string sss(str,1, strlen(str)-2); op_value.push(sss); } else { string sss(str); op_value.push(sss); }; op_type.push("STRING"); } void emit_string_grp(const char *str, const char *str_grp) { emit_string(str); grp_val = str_grp; }; void emit_fieldname(const char* name1, const char* name2) { string s1(name1); string s2(name2); op_type.push("FIELD"); op_value.push(s1 + "." + s2); }; void emit_number(const int_type val) { op_type.push("NUMBER"); op_nums.push(val); op_nums_precision.push(0); } void emit_float(const float_type val) { op_type.push("FLOAT"); op_nums_f.push(val); } void emit_decimal(const char* str) { op_type.push("NUMBER"); string s1(str); unsigned int precision; auto pos = s1.find("."); if(pos == std::string::npos) precision = 0; else { precision = (s1.length() - pos) -1; s1.erase(pos,1); }; op_nums.push(stoi(s1)); op_nums_precision.push(precision); } void emit_mul() { op_type.push("MUL"); } void emit_add() { op_type.push("ADD"); } void emit_div() { op_type.push("DIV"); } unsigned int misses = 0; void emit_and() { op_type.push("AND"); join_col_cnt++; } void emit_eq() { op_type.push("JOIN"); join_eq_type.push('E'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_neq() { op_type.push("JOIN"); join_eq_type.push('N'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_distinct() { op_type.push("DISTINCT"); distinct_cnt++; } void emit_year() { op_type.push("YEAR"); } void emit_month() { op_type.push("MONTH"); } void emit_day() { op_type.push("DAY"); } void emit_cast() { op_type.push("CAST"); } void emit_or() { op_type.push("OR"); } void emit_minus() { op_type.push("MINUS"); } void emit_cmp(int val) { op_type.push("CMP"); op_nums.push(val); } void emit(const char *s, ...) { } void emit_var(const char *s, const int c, const char *f, const char* ref, const char* ref_name) { namevars.push(s); typevars.push(f); sizevars.push(0); cols.push(c); } void emit_var_asc(const char *s) { op_type.push(s); op_value.push("ASC"); } void emit_var_desc(const char *s) { op_type.push(s); op_value.push("DESC"); } void emit_sort(const char *s, const int p) { op_sort.push(s); partition_count = p; } void emit_presort(const char *s) { op_presort.push(s); } void emit_varchar(const char *s, const int c, const char *f, const int d, const char *ref, const char* ref_name, const char* encoding) { namevars.push(s); typevars.push(f); sizevars.push(d); cols.push(c); varencoding[s] = encoding[0]; } void emit_vardecimal(const char *s, const int c, const char *f, const int scale, const int precision) { namevars.push(s); typevars.push(f); sizevars.push(precision); cols.push(c); } void emit_sel_name(const char *s) { op_type.push("emit sel_name"); op_value.push(s); sel_count++; } void emit_count() { op_type.push("COUNT"); } void emit_sum() { op_type.push("SUM"); } void emit_average() { op_type.push("AVG"); } void emit_min() { op_type.push("MIN"); } void emit_max() { op_type.push("MAX"); } void emit_join_tab(const char *s, const char tp) { op_join.push(s); join_tab_cnt++; join_type.push(tp); }; void order_inplace_host(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); char* temp = new char[a->mRecCount*max_char(a)]; stack<string> exe_type1(exe_type), exe_value; while(!exe_type1.empty()) { exe_value.push("ASC"); exe_type1.pop(); }; // sort on host for(;!exe_type.empty(); exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] != 1) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if (a->type[*it] != 1) { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_int[*it].data(), (int_type*)temp); thrust::copy((int_type*)temp, (int_type*)temp + a->mRecCount, a->h_columns_int[*it].data()); } else { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_float[*it].data(), (float_type*)temp); thrust::copy((float_type*)temp, (float_type*)temp + a->mRecCount, a->h_columns_float[*it].data()); } }; delete [] temp; delete [] permutation; } void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { if(scratch.size() < a->mRecCount*4) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+a->mRecCount,0,1); unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation); if(a->grp.size() < a->mRecCount*8) a->grp.resize(a->mRecCount*8); unsigned int bits; for(; !exe_type.empty(); exe_type.pop()) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[exe_type.top()]; if (a->type[exe_type.top()] != 1) { update_permutation(a->d_columns_int[exe_type.top()], raw_ptr, a->mRecCount, "ASC", (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else update_permutation(a->d_columns_float[exe_type.top()], raw_ptr, a->mRecCount,"ASC", (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[*it]; if (a->type[*it] != 1) { apply_permutation(a->d_columns_int[*it], raw_ptr, a->mRecCount, (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else { apply_permutation(a->d_columns_float[*it], raw_ptr, a->mRecCount, (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; }; } bool check_star_join(const string j1) { auto op_vals(op_value); for(auto i=0; i < sel_count; i++) { op_vals.pop(); op_vals.pop(); }; if(join_tab_cnt > 0) { while(op_vals.size()) { if (std::find(varNames[j1]->columnNames.begin(), varNames[j1]->columnNames.end(), op_vals.front()) != varNames[j1]->columnNames.end()) { op_vals.pop(); op_vals.pop(); } else { return 0; }; }; if(join_tab_cnt == 1) { if(!check_bitmap_file_exist(varNames[j1], varNames[op_join.front()])) { return 0; }; }; return 1; } else return 0; } void star_join(const char *s, const string j1) { map<string,bool> already_copied; queue<string> op_left; CudaSet* left = varNames.find(j1)->second; queue<string> op_sel; queue<string> op_sel_as; for(auto i=0; i < sel_count; i++) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_value.front()) != left->columnNames.end()) op_left.push(op_value.front()); op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; auto op_sel_s(op_sel), op_sel_s_as(op_sel_as), op_g(op_value); CudaSet* c = new CudaSet(op_sel_s, op_sel_s_as); string f1, f2; map<string, string> key_map; map<string, char> sort_map; map<string, string> r_map; for(auto i = 0; i < join_tab_cnt; i++) { f1 = op_g.front(); op_g.pop(); f2 = op_g.front(); op_g.pop(); r_map[f1] = f2; queue<string> op_jj(op_join); for(auto z = 0; z < (join_tab_cnt-1) - i; z++) op_jj.pop(); size_t rcount; queue<string> op_vd(op_g), op_alt(op_sel); unsigned int jc = join_col_cnt; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; key_map[op_jj.front()] = f1; CudaSet* right = varNames.find(op_jj.front())->second; if(!check_bitmaps_exist(left, right)) { cout << "Required bitmap on table " << op_jj.front() << " doesn't exists" << endl; exit(0); }; queue<string> second; while(!op_alt.empty()) { if(f2.compare(op_alt.front()) != 0 && std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) { second.push(op_alt.front()); //cout << "col " << op_alt.front() << " " << op_jj.front() << endl; op_left.push(f1); }; op_alt.pop(); }; if(!second.empty()) { right->filtered = 0; right->mRecCount = right->maxRecs; load_queue(second, right, "", rcount, 0, right->segCount, 0,0); // put all used columns into GPU }; }; queue<string> idx; set<string> already_loaded; bool right_cpy = 0; for (unsigned int i = 0; i < left->segCount; i++) { std::clock_t start2 = std::clock(); if(verbose) cout << "segment " << i << " " << getFreeMem() << endl; idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { //load the index if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; char a; //cout << "loading index " << idx.front() << endl; a = left->loadIndex(idx.front(), i); sort_map[idx.front().substr(pos1+1, pos2-pos1-1)] = a; }; idx.pop(); }; left->filtered = 0; size_t cnt_c = 0; allocColumns(left, left->fil_value); copyColumns(left, left->fil_value, i, cnt_c); bool* res = filter(left->fil_type, left->fil_value, left->fil_nums, left->fil_nums_f, left->fil_nums_precision, left, i); thrust::device_ptr<bool> star((bool*)res); size_t cnt = thrust::count(star, star + (unsigned int)left->mRecCount, 1); //cout << "join res " << cnt << " out of " << left->mRecCount << endl; thrust::host_vector<unsigned int> prm_vh(cnt); thrust::device_vector<unsigned int> prm_v(cnt); thrust::host_vector<unsigned int> prm_tmp(cnt); thrust::device_vector<unsigned int> prm_tmp_d(cnt); //std::cout<< "seg filter " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; if(cnt) { //gather //start1 = std::clock(); left->prm_d.resize(cnt); thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)left->mRecCount-1), star, left->prm_d.begin(), thrust::identity<bool>()); thrust::device_free(star); prm_vh = left->prm_d; size_t offset = c->mRecCount; c->resize_join(cnt); queue<string> op_sel1(op_sel_s); void* temp; CUDA_SAFE_CALL(cudaMalloc((void **) &temp, cnt*max_char(c))); cudaMemset(temp,0,cnt*max_char(c)); CudaSet *t; unsigned int cnt1, bits; int_type lower_val; thrust::device_vector<unsigned int> output(cnt); //std::cout<< "seg start " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_sel1.empty()) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end()) { if(left->filtered) t = varNames[left->source_name]; else t = left; if(left->type[op_sel1.front()] <= 1) { if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFile(i, op_sel1.front(), offset, prm_vh, c); //std::cout<< "SSD L SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, op_sel1.front()); void* h; if(!interactive) { if(left->type[op_sel1.front()] == 0) h = t->h_columns_int[op_sel1.front()].data(); else h = t->h_columns_float[op_sel1.front()].data(); } else { string ff = t->load_file_name + "." + op_sel1.front()+ "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0];//bytes lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << " " << left->type[op_sel1.front()] << endl; if(bits == 8) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 16) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 32) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), ptr + offset); } } else if(bits == 64) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), ptr + offset); }; }; }; if(left->type[op_sel1.front()] != 1) thrust::transform( c->h_columns_int[op_sel1.front()].begin() + offset, c->h_columns_int[op_sel1.front()].begin() + offset + cnt, thrust::make_constant_iterator(lower_val), c->h_columns_int[op_sel1.front()].begin() + offset, thrust::plus<int_type>()); else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::transform(ptr + offset, ptr + offset + cnt, thrust::make_constant_iterator(lower_val), ptr + offset, thrust::plus<int_type>()); thrust::transform(ptr + offset, ptr + offset + cnt, c->h_columns_float[op_sel1.front()].begin() + offset, long_to_float()); }; } else { //gather string. There are no strings in fact tables. }; } else { for(auto it = key_map.begin(); it != key_map.end(); it++) { CudaSet* r = varNames.find(it->first)->second; if(std::find(r->columnNames.begin(), r->columnNames.end(), op_sel1.front()) != r->columnNames.end()) { if(i == 0) { if(data_dict[varNames[it->first]->load_file_name][op_sel1.front()].col_type == 2) { //cout << "SET " << op_sel1.front() << " to " << varNames[it->first]->load_file_name + "." + op_sel1.front() << endl; c->string_map[op_sel1.front()] = varNames[it->first]->load_file_name + "." + op_sel1.front(); }; } if(left->filtered) t = varNames[left->source_name]; else t = left; if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFileR(i, key_map[it->first], prm_vh, prm_tmp); //std::cout<< "SSD R SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, key_map[it->first]); void* h; if(!interactive) { h = t->h_columns_int[key_map[it->first]].data(); } else { string ff = t->load_file_name + "." + key_map[it->first] + "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0]; lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << endl; if(bits == 8) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 16) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 32) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 64) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), prm_tmp.begin()); }; }; if(lower_val != 1) thrust::transform(prm_tmp.begin(), prm_tmp.end(), thrust::make_constant_iterator(lower_val-1), prm_tmp.begin(), thrust::plus<unsigned int>()); if(sort_map[r->source_name] == '1') { // sorted consecutive starting with 1 dimension keys prm_tmp_d = prm_tmp; //cout << "PATH 1 " << endl; } else { //cout << "PATH 2 " << r->source_name << endl; output = prm_tmp; if(r->d_columns_int[r_map[key_map[it->first]]].size() == 0) { r->d_columns_int[r_map[key_map[it->first]]].resize(r->maxRecs); }; if(right_cpy == 0) { r->CopyColumnToGpu(r_map[key_map[it->first]]); }; thrust::lower_bound(r->d_columns_int[r_map[key_map[it->first]]].begin(), r->d_columns_int[r_map[key_map[it->first]]].end(), output.begin(), output.end(), prm_tmp_d.begin()); }; if(r->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_float[op_sel1.front()].begin() + offset); }; break; }; }; }; op_sel1.pop(); //std::cout<< ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; }; cudaFree(temp); right_cpy = 1; }; //std::cout<< "SEG " << i << " " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; //unload the segment indexes : idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; string f1 = idx.front() + "." + to_string(i); auto it = index_buffers.find(f1); if(it != index_buffers.end()) { cudaFreeHost(index_buffers[f1]); index_buffers.erase(it); }; }; idx.pop(); }; }; //if(verbose) // std::cout<< "star join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_join.empty()) { varNames[op_join.front()]->deAllocOnDevice(); op_join.pop(); }; varNames[s] = c; c->maxRecs = c->mRecCount; if(verbose) cout << endl << "join count " << c->mRecCount << endl; }; void emit_join(const char *s, const char *j1, const int grp, const int start_seg, const int end_seg) { //cout << "emit_join " << s << " " << join_tab_cnt << " " << op_join.front() << endl; statement_count++; if (scan_state == 0) { if (mystat.find(j1) == mystat.end() && data_dict.count(j1) == 0) { process_error(2, "Join : couldn't find variable " + string(j1) ); }; if (mystat.find(op_join.front()) == mystat.end() && data_dict.count(op_join.front()) == 0) { process_error(2, "Join : couldn't find variable " + op_join.front() ); }; mystat[s] = statement_count; mystat[j1] = statement_count; if(filter_var.find(j1) != filter_var.end()) { mystat[filter_var[j1]] = statement_count; }; check_used_vars(); while(!op_join.empty()) { mystat[op_join.front()] = statement_count; if(filter_var.find(op_join.front()) != filter_var.end()) { mystat[filter_var[op_join.front()]] = statement_count; }; op_join.pop(); }; return; }; queue<string> op_m(op_value); if(check_star_join(j1)) { if(verbose) cout << "executing star join !! " << endl; star_join(s, j1); } else { if(join_tab_cnt > 1) { string tab_name; for(unsigned int i = 1; i <= join_tab_cnt; i++) { if(i == join_tab_cnt) tab_name = s; else tab_name = s + to_string(i); string j, j2; if(i == 1) { j2 = op_join.front(); op_join.pop(); j = op_join.front(); op_join.pop(); } else { if(!op_join.empty()) { j = op_join.front(); op_join.pop(); } else j = j1; j2 = s + to_string(i-1); }; emit_multijoin(tab_name, j, j2, i, s, start_seg, end_seg); op_value = op_m; }; } else { emit_multijoin(s, j1, op_join.front(), 1, s, start_seg, end_seg); op_join.pop(); }; }; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_m.front()); op_m.pop(); op_sel_as.push(op_m.front()); op_m.pop(); }; while(!op_sel_as.empty()) { //cout << "alias " << op_sel.front() << " : " << op_sel_as.front() << endl; if(op_sel.front() != op_sel_as.front()) { if(varNames[s]->type[op_sel.front()] == 0) { varNames[s]->h_columns_int[op_sel_as.front()] = varNames[s]->h_columns_int[op_sel.front()]; varNames[s]->h_columns_int.erase(op_sel.front()); varNames[s]->d_columns_int[op_sel_as.front()] = varNames[s]->d_columns_int[op_sel.front()]; varNames[s]->d_columns_int.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 0; varNames[s]->type.erase(op_sel.front()); } else if(varNames[s]->type[op_sel.front()] == 1) { varNames[s]->h_columns_float[op_sel_as.front()] = varNames[s]->h_columns_float[op_sel.front()]; varNames[s]->h_columns_float.erase(op_sel.front()); varNames[s]->d_columns_float[op_sel_as.front()] = varNames[s]->d_columns_float[op_sel.front()]; varNames[s]->d_columns_float.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 1; varNames[s]->type.erase(op_sel.front()); varNames[s]->decimal.erase(op_sel.front()); } else { varNames[s]->h_columns_char[op_sel_as.front()] = varNames[s]->h_columns_char[op_sel.front()]; varNames[s]->h_columns_char.erase(op_sel.front()); varNames[s]->d_columns_char[op_sel_as.front()] = varNames[s]->d_columns_char[op_sel.front()]; varNames[s]->d_columns_char.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 2; varNames[s]->type.erase(op_sel.front()); varNames[s]->char_size[op_sel_as.front()] = varNames[s]->char_size[op_sel.front()]; varNames[s]->char_size.erase(op_sel.front()); }; varNames[s]->decimal[op_sel_as.front()] = varNames[s]->decimal[op_sel.front()]; auto it = std::find(varNames[s]->columnNames.begin(), varNames[s]->columnNames.end(), op_sel.front()); *it = op_sel_as.front(); }; op_sel_as.pop(); op_sel.pop(); }; clean_queues(); if(mystat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(op_join.size()) { if(mystat[op_join.front()] == statement_count && op_join.front().compare(j1) != 0) { varNames[op_join.front()]->free(); varNames.erase(op_join.front()); }; }; } template<typename T, typename P> void p_gather(thrust::host_vector<int>& h_tmp, T* h, P* dest) { for(int i = 0; i < h_tmp.size(); i++) { dest[i] = h[h_tmp[i]]; }; }; void emit_multijoin(const string s, const string j1, const string j2, const unsigned int tab, const char* res_name, const int start_segment, const int end_segment) { if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) { clean_queues(); if(varNames.find(j1) == varNames.end()) cout << "Couldn't find j1 " << j1 << endl; if(varNames.find(j2) == varNames.end()) cout << "Couldn't find j2 " << j2 << " here " << endl; return; }; CudaSet* left = varNames.find(j1)->second; CudaSet* right = varNames.find(j2)->second; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; queue<string> op_sel_s(op_sel); queue<string> op_sel_s_as(op_sel_as); queue<string> op_g(op_value); if(tab > 0) { for(unsigned int z = 0; z < join_tab_cnt - tab; z++) { for(unsigned int j = 0; j < join_and_cnt[z]*2 + 2; j++) { op_sel_s.push(op_g.front()); op_sel_s_as.push(op_g.front()); op_g.pop(); }; }; }; string f1 = op_g.front(); op_g.pop(); string f2 = op_g.front(); op_g.pop(); if (verbose) cout << "JOIN " << s << " " << f1 << " " << f2 << " " << getFreeMem() << " " << phase_copy << endl; std::clock_t start1 = std::clock(); CudaSet* c = new CudaSet(right, left, op_sel_s, op_sel_s_as); if ((left->mRecCount == 0 && !left->filtered) || (right->mRecCount == 0 && !right->filtered)) { c = new CudaSet(left, right, op_sel_s, op_sel_s_as); varNames[res_name] = c; clean_queues(); return; }; if(join_tab_cnt > 1 && tab < join_tab_cnt) c->tmp_table = 1; else c->tmp_table = 0; string colname1, colname2; string tmpstr; if (std::find(left->columnNames.begin(), left->columnNames.end(), f1) != left->columnNames.end()) { colname1 = f1; if (std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) { colname2 = f2; } else { process_error(2, "Couldn't find column " + f2 ); }; } else if (std::find(right->columnNames.begin(), right->columnNames.end(), f1) != right->columnNames.end()) { colname2 = f1; tmpstr = f1; f1 = f2; if (std::find(left->columnNames.begin(), left->columnNames.end(), f2) != left->columnNames.end()) { colname1 = f2; f2 = tmpstr; } else { process_error(2, "Couldn't find column " +f2 ); }; } else { process_error(2, "Couldn't find column " + f1); }; if (!((left->type[colname1] == 0 && right->type[colname2] == 0) || (left->type[colname1] == 2 && right->type[colname2] == 2) || (left->type[colname1] == 1 && right->type[colname2] == 1 && left->decimal[colname1] && right->decimal[colname2]))) { process_error(2, "Joins on floats are not supported "); }; //bool decimal_join = 0; //if (left->type[colname1] == 1 && right->type[colname2] == 1) // decimal_join = 1; queue<string> op_vd(op_g); queue<string> op_g1(op_g); queue<string> op_alt(op_sel); unsigned int jc = join_and_cnt[join_tab_cnt - tab]; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; size_t rcount = 0, cnt_r; queue<string> cc; if (left->type[colname1] == 2) { left->d_columns_int[colname1] = thrust::device_vector<int_type>(); } else { cc.push(f1); allocColumns(left, cc); }; left->hostRecCount = left->mRecCount; size_t cnt_l, res_count, tot_count = 0, offset = 0, k = 0; queue<string> lc(cc); thrust::device_vector<unsigned int> v_l(left->maxRecs); //MGPU_MEM(int) aIndicesDevice, bIndicesDevice, intersectionDevice; stack<string> exe_type; set<string> field_names; exe_type.push(f2); for(unsigned int i = 0; i < right->columnNames.size(); i++) { if (std::find(c->columnNames.begin(), c->columnNames.end(), right->columnNames[i]) != c->columnNames.end() || right->columnNames[i] == f2 || join_and_cnt[join_tab_cnt - tab]) { field_names.insert(right->columnNames[i]); }; }; thrust::device_vector<int> p_tmp; unsigned int start_part = 0; bool prejoin = 0; while(start_part < right->segCount) { right->deAllocOnDevice(); std::clock_t start12 = std::clock(); if(right->not_compressed || (!right->filtered && getFreeMem() < right->columnNames.size()*right->hostRecCount*8*2)) { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, start_part+1); start_part = start_part+1; } else { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, right->segCount); start_part = right->segCount; for(unsigned int i=0; i < right->columnNames.size(); i++) { if (right->type[right->columnNames[i]] != 1) { right->d_columns_int[right->columnNames[i]].shrink_to_fit(); } else right->d_columns_float[right->columnNames[i]].shrink_to_fit(); }; }; right->mRecCount = cnt_r; bool order = 1; if(!right->presorted_fields.empty() && right->presorted_fields.front() == f2) { order = 0; //cout << "No need to sort " << endl; if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) right->sort_check = '1'; else { right->sort_check = '0'; }; }; if(order) { if(thrust::is_sorted(right->d_columns_int[f2].begin(), right->d_columns_int[f2].end())) { if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) { right->sort_check = '1'; } else { right->sort_check = '0'; }; } else { //cout << "sorting " << endl; size_t tot_size = right->mRecCount*8*right->columnNames.size(); if (getFreeMem() > tot_size*1.5) { order_inplace(right, exe_type, field_names, 0); } else { for (auto it=field_names.begin(); it!=field_names.end(); ++it) { //cout << "sorting " << *it << endl; if(right->type[*it] != 1) { if(right->h_columns_int[*it].size() < right->mRecCount) right->h_columns_int[*it].resize(right->mRecCount); thrust::copy(right->d_columns_int[*it].begin(), right->d_columns_int[*it].begin() + right->mRecCount, right->h_columns_int[*it].begin()); } else { if(right->type[*it] == 1) { if(right->h_columns_float[*it].size() < right->mRecCount) right->h_columns_float[*it].resize(right->mRecCount); }; thrust::copy(right->d_columns_float[*it].begin(), right->d_columns_float[*it].begin() + right->mRecCount, right->h_columns_float[*it].begin()); }; }; order_inplace_host(right, exe_type, field_names, 0); for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(right->type[*it] != 1) thrust::copy(right->h_columns_int[*it].begin(), right->h_columns_int[*it].begin() + right->mRecCount, right->d_columns_int[*it].begin()); else thrust::copy(right->h_columns_float[*it].begin(), right->h_columns_float[*it].begin() + right->mRecCount, right->d_columns_float[*it].begin()); }; }; }; }; //std::cout<< "join right load time " << ( ( std::clock() - start12 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; int e_segment; if(end_segment == -1) { e_segment = left->segCount; } else e_segment = end_segment; for (unsigned int i = start_segment; i < e_segment; i++) { if(verbose) //cout << "segment " << i << '\xd'; cout << "segment " << i << endl; cnt_l = 0; copyColumns(left, lc, i, cnt_l); cnt_l = left->mRecCount; auto join_eq_type1(join_eq_type); if (cnt_l) { // sort the left index column, save the permutation vector, it might be needed later thrust::device_ptr<int_type> d_col((int_type*)thrust::raw_pointer_cast(left->d_columns_int[colname1].data())); thrust::sequence(v_l.begin(), v_l.begin() + cnt_l,0,1); bool do_sort = 1; if(!left->sorted_fields.empty()) { if(left->sorted_fields.front() == f1) { do_sort = 0; }; } else if(!left->presorted_fields.empty()) { if(left->presorted_fields.front() == f1) { do_sort = 0; }; }; if(do_sort) { thrust::sort_by_key(d_col, d_col + cnt_l, v_l.begin()); } else if(verbose) cout << "No need of sorting " << endl; if(prejoin) { //res_count = SetOpKeys<MgpuSetOpIntersection, true>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, // thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, // &intersectionDevice, *context, false); //if(!res_count) // continue; }; if (left->d_columns_int[colname1][0] > right->d_columns_int[colname2][cnt_r-1] || left->d_columns_int[colname1][cnt_l-1] < right->d_columns_int[colname2][0]) { if(verbose) cout << endl << "skipping after copying " << endl; continue; }; //else // cout << "JOINING " << left->d_columns_int[colname1][0] << ":" << left->d_columns_int[colname1][cnt_l-1] << " AND " << right->d_columns_int[colname2][0] << ":" << right->d_columns_int[colname2][cnt_r-1] << endl; //cout << "joining " << left->d_columns_int[colname1][0] << " : " << left->d_columns_int[colname1][cnt_l-1] << " and " << right->d_columns_int[colname2][0] << " : " << right->d_columns_int[colname2][cnt_r-1] << endl; char join_kind = join_type.front(); std::clock_t start11 = std::clock(); mem_t<int2> res; if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { //res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, // thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, // &aIndicesDevice, &bIndicesDevice, // mgpu::less<int_type>(), *context); res = inner_join(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, less_t<int_type>(), context); }; res_count = res.size(); /* else if(join_kind == 'L') res_count = RelationalJoin<MgpuJoinKindLeft>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'R') res_count = RelationalJoin<MgpuJoinKindRight>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'O') res_count = RelationalJoin<MgpuJoinKindOuter>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); */ if(verbose) std::cout<< "join time " << ( ( std::clock() - start11 ) / (double)CLOCKS_PER_SEC ) << '\n'; if(verbose) cout << "RES " << res_count << endl; if(res_count == 0) prejoin = 1; thrust::device_ptr<int> d_res1 = thrust::device_malloc<int>(res_count); thrust::device_ptr<int> d_res2 = thrust::device_malloc<int>(res_count); thrust::counting_iterator<unsigned int> begin(0); split_int2 ff(thrust::raw_pointer_cast(d_res1), thrust::raw_pointer_cast(d_res2), res.data()); thrust::for_each(begin, begin + res_count, ff); if(res_count) { p_tmp.resize(res_count); thrust::sequence(p_tmp.begin(), p_tmp.end(),-1); thrust::gather_if(d_res1, d_res1+res_count, d_res1, v_l.begin(), p_tmp.begin(), _1 >= 0); }; // check if the join is a multicolumn join unsigned int mul_cnt = join_and_cnt[join_tab_cnt - tab]; while(mul_cnt) { mul_cnt--; queue<string> mult(op_g); string f3 = mult.front(); mult.pop(); string f4 = mult.front(); mult.pop(); //cout << "ADDITIONAL COL JOIN " << f3 << " " << f4 << " " << join_eq_type.front() << endl; queue<string> rc; rc.push(f3); allocColumns(left, rc); size_t offset = 0; copyColumns(left, rc, i, offset, 0, 0); rc.pop(); if (res_count) { thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(res_count); if(right->d_columns_int[f4].size() == 0) load_queue(rc, right, f4, rcount, 0, right->segCount, 0, 0); if (left->type[f3] == 1 && right->type[f4] == 1) { thrust::transform(make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_float[f4].begin(), d_res2), d_add, float_equal_to()); } else { if(join_eq_type1.front() != 'N') thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::equal_to<int_type>()); else { thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::not_equal_to<int_type>()); }; }; if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { // result count changes only in case of an inner join unsigned int new_cnt = thrust::count(d_add, d_add+res_count, 1); thrust::stable_partition(d_res2, d_res2 + res_count, d_add, thrust::identity<unsigned int>()); thrust::stable_partition(p_tmp.begin(), p_tmp.end(), d_add, thrust::identity<unsigned int>()); res_count = new_cnt; } else { //otherwise we consider it a valid left join result with non-nulls on the left side and nulls on the right side thrust::transform(d_res2, d_res2 + res_count, d_add , d_res2, set_minus()); }; thrust::device_free(d_add); }; if(!join_eq_type1.empty()) join_eq_type1.pop(); }; while(!join_eq_type1.empty()) join_eq_type1.pop(); //cout << "MUL res_count " << res_count << endl; if(join_kind == '1') { //LEFT SEMI thrust::sort(p_tmp.begin(), p_tmp.begin() + res_count); auto new_end = thrust::unique(p_tmp.begin(), p_tmp.begin() + res_count); res_count = new_end - p_tmp.begin(); } else if(join_kind == '2'){ // RIGHT SEMI thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); res_count = new_end - d_res2; auto old_sz = ranj.size(); ranj.resize(ranj.size() + res_count); thrust::copy(d_res2, d_res2 + res_count, ranj.begin() + old_sz); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } else if(join_kind == '3'){ // ANTI JOIN LEFT thrust::counting_iterator<int> iter(0); thrust::device_vector<int> rr(cnt_l); auto new_end = thrust::set_difference(iter, iter+cnt_l, p_tmp.begin(), p_tmp.begin() + res_count, rr.begin()); res_count = new_end - rr.begin(); thrust::copy(rr.begin(), new_end, p_tmp.begin()); } else if(join_kind == '4'){ // ANTI JOIN RIGHT thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); auto cnt = new_end - d_res2; thrust::device_vector<int> seq(cnt + ranj.size()); //auto new_end = thrust::set_difference(seq.begin(), seq.end(), d_res2, d_res2 + res_count, rr.begin()); auto new_end1 = thrust::set_union(d_res2, d_res2 + cnt, ranj.begin(), ranj.end(), seq.begin()); auto s_cnt = new_end1 - seq.begin(); thrust::sort(seq.begin(), seq.begin() + s_cnt); auto end_seq = thrust::unique(seq.begin(), seq.begin() + s_cnt); auto u_cnt = end_seq - seq.begin(); ranj.resize(u_cnt); thrust::copy(seq.begin(), seq.begin() + u_cnt, ranj.begin()); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } tot_count = tot_count + res_count; //cout << "tot " << tot_count << endl; //std::clock_t start12 = std::clock(); if(res_count && join_kind != '4' && join_kind != '2') { offset = c->mRecCount; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end() && join_kind != '2') { allocColumns(left, cc); copyColumns(left, cc, i, k, 0, 0); //gather if(left->type[op_sel1.front()] != 1 ) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); }; if(op_sel1.front() != colname1) left->deAllocColumnOnDevice(op_sel1.front()); //}; } else if(std::find(right->columnNames.begin(), right->columnNames.end(), op_sel1.front()) != right->columnNames.end()) { //gather if(right->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); } } else { }; op_sel1.pop(); }; }; thrust::device_free(d_res1); thrust::device_free(d_res2); }; }; if(join_type.front() == '4') { thrust::device_vector<int> st(cnt_r); thrust::sequence(st.begin(), st.end(),0,1); thrust::device_vector<int> r(cnt_r); auto new_end = thrust::set_difference(st.begin(), st.end(), ranj.begin(), ranj.end(), r.begin()); ranj.resize(0); res_count = new_end - r.begin(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(r.begin(), r.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; } else if(join_type.front() == '2') { res_count = ranj.size(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(ranj.begin(), ranj.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; ranj.resize(0); }; }; left->deAllocOnDevice(); right->deAllocOnDevice(); c->deAllocOnDevice(); varNames[s] = c; c->mRecCount = tot_count; c->hostRecCount = tot_count; c->name = s; if(verbose) cout << "tot res " << tot_count << " " << getFreeMem() << endl; if(right->tmp_table == 1) { right->free(); varNames.erase(j2); } else { if(mystat[j2] == statement_count) { right->free(); varNames.erase(j2); }; }; if(mystat[j1] == statement_count) { left->free(); varNames.erase(j1); }; join_type.pop(); if(!join_eq_type.empty()) join_eq_type.pop(); size_t tot_size = tot_count*8*c->columnNames.size(); if (getFreeMem() > tot_size) { c->maxRecs = tot_count; c->segCount = 1; } else { c->segCount = ((tot_size)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; if(verbose) std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; } void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value) { unsigned int tot = 0; if(!a->not_compressed) { //compressed allocColumns(a, names); unsigned int c = 0; size_t cnt = 0; for(unsigned int i = 0; i < a->segCount; i++) { copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu if (a->mRecCount) { a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount); tot = tot + a->mRecCount; }; }; } else tot = a->mRecCount; b->resize(tot); //resize host arrays a->mRecCount = tot; unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); size_t maxSize = a->mRecCount; char* temp; temp = new char[maxSize*max_char(a)]; // sort on host for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else if (a->type[exe_type.top()] == 1) update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); else { update_char_permutation(a, exe_type.top(), permutation, exe_value.top(), temp, 1); }; }; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation_host(a->h_columns_int[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_int[a->columnNames[i]].data()); } else apply_permutation_host(a->h_columns_float[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_float[a->columnNames[i]].data()); }; delete [] temp; delete [] permutation; } void emit_order(const char *s, const char *f, const int e, const int ll) { if(ll == 0) statement_count++; if (scan_state == 0 && ll == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(2, "Order : couldn't find variable " + string(f)); }; mystat[s] = statement_count; mystat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) mystat[filter_var[f]] = statement_count; return; }; if (scan_state == 0) { check_used_vars(); return; }; if(varNames.find(f) == varNames.end() ) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; stack<string> exe_type, exe_value; if(verbose) cout << "ORDER: " << s << " " << f << endl; for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) { if ((op_type.front()).compare("NAME") == 0) { exe_type.push(op_value.front()); exe_value.push("ASC"); } else { exe_type.push(op_type.front()); exe_value.push(op_value.front()); }; if(std::find(a->columnNames.begin(), a->columnNames.end(), exe_type.top()) == a->columnNames.end()) { process_error(2, "Couldn't find name " + exe_type.top()); }; }; stack<string> tp(exe_type); queue<string> op_vx; while (!tp.empty()) { op_vx.push(tp.top()); tp.pop(); }; queue<string> names; for (unsigned int i = 0; i < a->columnNames.size() ; i++ ) names.push(a->columnNames[i]); CudaSet *b = a->copyDeviceStruct(); //lets find out if our data set fits into a GPU size_t mem_available = getFreeMem(); size_t rec_size = 0; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] == 0) rec_size = rec_size + int_size; else if(a->type[a->columnNames[i]] == 1) rec_size = rec_size + float_size; else rec_size = rec_size + a->char_size[a->columnNames[i]]; }; bool fits; if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU fits = 0; else fits = 1; if(!fits) { order_on_host(a, b, names, exe_type, exe_value); } else { // initialize permutation to [0, 1, 2, ... ,N-1] size_t rcount; if(a->filtered) { CudaSet *t = varNames[a->source_name]; a->mRecCount = t->mRecCount; a->hostRecCount = a->mRecCount; }; a->mRecCount = load_queue(names, a, op_vx.front(), rcount, 0, a->segCount); if(scratch.size() < a->mRecCount) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+(a->mRecCount)); unsigned int* perm_ptr = thrust::raw_pointer_cast(permutation); void* temp; CUDA_SAFE_CALL(cudaMalloc((void **) &temp, a->mRecCount*max_char(a))); if(a->filtered) varNames[a->source_name]->hostRecCount = varNames[a->source_name]->mRecCount; else a->hostRecCount = a->mRecCount;; if(a->filtered) varNames[a->source_name]->mRecCount = varNames[a->source_name]->hostRecCount; else a->mRecCount = a->hostRecCount; for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0 && a->string_map.find(exe_type.top()) == a->string_map.end()) update_permutation(a->d_columns_int[exe_type.top()], perm_ptr, a->mRecCount, exe_value.top(), (int_type*)temp, 64); else if (a->type[exe_type.top()] == 1) update_permutation(a->d_columns_float[exe_type.top()], perm_ptr, a->mRecCount,exe_value.top(), (float_type*)temp, 64); else { //get strings to device update_char_permutation(a, exe_type.top(), perm_ptr, exe_value.top(), temp, 0); }; }; b->resize(a->mRecCount); //resize host arrays b->mRecCount = a->mRecCount; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation(a->d_columns_int[a->columnNames[i]], perm_ptr, a->mRecCount, (int_type*)temp, 64); } else apply_permutation(a->d_columns_float[a->columnNames[i]], perm_ptr, a->mRecCount, (float_type*)temp, 64); }; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] != 1) { thrust::copy(a->d_columns_int[a->columnNames[i]].begin(), a->d_columns_int[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_int[a->columnNames[i]].begin()); } else thrust::copy(a->d_columns_float[a->columnNames[i]].begin(), a->d_columns_float[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_float[a->columnNames[i]].begin()); }; b->deAllocOnDevice(); a->deAllocOnDevice(); cudaFree(temp); }; varNames[s] = b; b->segCount = 1; b->not_compressed = 1; b->string_map = a->string_map; if(mystat[f] == statement_count && !a->keep) { a->free(); varNames.erase(f); }; } void emit_select(const char *s, const char *f, const int grp_cnt) { statement_count++; if (scan_state == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(2, "Select : couldn't find variable " + string(f) ); }; mystat[s] = statement_count; mystat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) mystat[filter_var[f]] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); cout << "Couldn't find1 " << f << endl; process_error(2, "Couldn't find(1) " + string(f) ); return; }; queue<string> op_v1(op_value); while(op_v1.size() > grp_cnt) op_v1.pop(); stack<string> op_v2; queue<string> op_v3; for(int i=0; i < grp_cnt; ++i) { op_v2.push(op_v1.front()); op_v3.push(op_v1.front()); op_v1.pop(); }; CudaSet *a; if(varNames.find(f) != varNames.end()) a = varNames.find(f)->second; else { process_error(2, "Couldn't find " + string(f) ); }; if(a->mRecCount == 0 && !a->filtered) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); if(verbose) cout << "SELECT " << s << " count : 0, Mem " << getFreeMem() << endl; return; }; if(verbose) cout << "SELECT " << s << " " << f << " " << getFreeMem() << endl; std::clock_t start1 = std::clock(); // here we need to determine the column count and composition queue<string> op_v(op_value); queue<string> op_vx; set<string> field_names; set<string> order_field_names; map<string,string> aliases; string tt; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { tt = op_v.front(); op_v.pop(); if(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end()) { if(aliases.count(tt) == 0) { aliases[tt] = op_v.front(); }; } else { while(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end() && !op_v.empty()) { op_v.pop(); }; }; }; }; if(!op_v.empty()) op_v.pop(); }; op_v = op_value; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { field_names.insert(op_v.front()); order_field_names.insert(op_v.front()); }; op_v.pop(); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { op_vx.push(*it); }; // find out how many columns a new set will have queue<string> op_t(op_type); int_type col_count = 0; for(int i=0; !op_t.empty(); ++i, op_t.pop()) if((op_t.front()).compare("emit sel_name") == 0) col_count++; CudaSet *b, *c; if(a->segCount <= 1) setSegments(a, op_vx); allocColumns(a, op_vx); unsigned int cycle_count; if(a->filtered) cycle_count = varNames[a->source_name]->segCount; else cycle_count = a->segCount; size_t ol_count = a->mRecCount, cnt; a->hostRecCount = a->mRecCount; b = new CudaSet(0, col_count); b->name = "tmp b in select"; bool c_set = 0; //size_t tmp_size = a->mRecCount; //if(a->segCount > 1) // tmp_size = a->maxRecs; vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_tmp; /* for(unsigned int i = 0; i < distinct_cnt; i++) { distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size)); distinct_val.push_back(thrust::device_vector<int_type>()); distinct_hash.push_back(thrust::device_vector<int_type>()); }; */ bool one_liner; if (grp_cnt != 0) phase_copy = 1; for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE if(verbose) cout << "segment " << i << " select mem " << getFreeMem() << endl; std::clock_t start3 = std::clock(); cnt = 0; copyColumns(a, op_vx, i, cnt); if(a->mRecCount) { if (grp_cnt != 0) { make_calc_columns(op_type, op_value, a, order_field_names); bool not_srt_and_eq = 0; stack<string> op_vv(op_v2); while(!op_vv.empty()) { if(!min_max_eq[op_vv.top()]) not_srt_and_eq = 1; op_vv.pop(); }; if(not_srt_and_eq) { order_inplace(a, op_v2, order_field_names, 1); a->GroupBy(op_v2); } else { if(a->grp.size() != 1) a->grp.resize(1); a->grp[0] = 1; a->grp_count = 1; }; } else a->grp_count = 0; queue<string> op_vx1; for (auto it=order_field_names.begin(); it!=order_field_names.end(); ++it) { op_vx1.push(*it); }; copyFinalize(a, op_vx1,0); one_liner = select(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a,b, distinct_tmp); if(i == 0) std::reverse(b->columnNames.begin(), b->columnNames.end()); if (!c_set && b->mRecCount > 0) { c = new CudaSet(0, col_count); create_c(c,b); c_set = 1; c->name = s; }; if (grp_cnt && cycle_count > 1 && b->mRecCount > 0) { add(c,b,op_v3, aliases, distinct_tmp, distinct_val, distinct_hash, a); } else { //copy b to c unsigned int c_offset = c->mRecCount; c->resize(b->mRecCount); for(unsigned int j=0; j < b->columnNames.size(); j++) { if (b->type[b->columnNames[j]] == 0) { thrust::copy(b->d_columns_int[b->columnNames[j]].begin(), b->d_columns_int[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_int[b->columnNames[j]].begin() + c_offset); } else if (b->type[b->columnNames[j]] == 1) { thrust::copy(b->d_columns_float[b->columnNames[j]].begin(), b->d_columns_float[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_float[b->columnNames[j]].begin() + c_offset); }; }; }; //std::cout<< "add time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << '\n'; }; std::cout<< "cycle sel time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; }; phase_copy = 0; a->mRecCount = ol_count; a->mRecCount = a->hostRecCount; a->deAllocOnDevice(); b->deAllocOnDevice(); a->grp.resize(0); a->grp.shrink_to_fit(); for(auto i = 0; i < alloced_mem.size(); i++) { cudaFree(alloced_mem[i]); alloced_mem.pop_back(); }; if(!c_set) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); return; }; if (grp_cnt) { count_avg(c, distinct_hash); } else { if(one_liner) { count_simple(c); }; }; c->maxRecs = c->mRecCount; c->hostRecCount = c->mRecCount; c->string_map = b->string_map; c->name = s; c->keep = 1; if(verbose) cout << "select res " << c->mRecCount << endl; size_t tot_size = c->maxRecs*8*c->columnNames.size(); if (getFreeMem() < tot_size*3) { c->segCount = ((tot_size*3)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; clean_queues(); varNames[s] = c; b->free(); varNames[s]->keep = 1; if(mystat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(mystat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; if(verbose) std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n'; } void emit_insert(const char *f, const char* s) { statement_count++; if (scan_state == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(2, "Insert : couldn't find variable " + string(f)); }; if (mystat.find(s) == mystat.end() && data_dict.count(s) == 0) { process_error(2, "Insert : couldn't find variable " + string(s) ); }; check_used_vars(); mystat[f] = statement_count; mystat[s] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end() || varNames.find(s) == varNames.end()) { clean_queues(); return; }; if(verbose) cout << "INSERT " << f << " " << s << endl; insert_records(f,s); clean_queues(); }; void emit_delete(const char *f) { statement_count++; if (scan_state == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(2, "Delete : couldn't find variable " + string(f)); }; mystat[f] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; delete_records(f); cout << "DELETE " << f << endl; clean_queues(); } void emit_case() { op_case = 1; if (scan_state == 1) cout << "emit case " << endl; //extract releveant values and pass to modified filter // get a bool vector back /* while(!op_type.empty()) { cout << "CASE type " << op_type.front() << endl; op_type.pop(); } */ } void emit_create_index(const char *index_name, const char *table, const char *column) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".key"; f = fopen(s3.c_str(), "w"); fputs(column,f); fclose(f); }; } void emit_create_interval(const char *interval_name, const char *table, const char *lcolumn, const char *rcolumn) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".interval"; f = fopen(s3.c_str(), "w"); fputs(lcolumn,f); fputc('|',f); fputs(rcolumn,f); fclose(f); }; } void emit_create_bitmap_index(const char *index_name, const char *ltable, const char *rtable, const char *rcolumn, const char *lid, const char *rid) { statement_count++; if (scan_state == 0) { emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); mystat[rtable] = std::numeric_limits<unsigned int>::max(); mystat[ltable] = std::numeric_limits<unsigned int>::max(); } else { cout << ltable << " " << rtable << " " << rid << " " << lid << endl; emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); if(varNames.find(ltable) == varNames.end()) cout << "Couldn't find " << ltable << endl; if(varNames.find(rtable) == varNames.end()) cout << "Couldn't find " << rtable << endl; CudaSet* left = varNames.find(ltable)->second; CudaSet* right = varNames.find(rtable)->second; queue<string> op_vx; op_vx.push(rcolumn);op_vx.push(rid); allocColumns(right, op_vx); right->CopyColumnToGpu(rid, 0, 0); right->CopyColumnToGpu(rcolumn, 0, 0); op_vx.pop();op_vx.pop(); op_vx.push(lid); allocColumns(left, op_vx); for(int i = 0; i < left->segCount; i++) { left->CopyColumnToGpu(lid, i, 0); thrust::device_vector<unsigned int> output(left->mRecCount); thrust::lower_bound(right->d_columns_int[rid].begin(), right->d_columns_int[rid].begin() + right->mRecCount, left->d_columns_int[lid].begin(), left->d_columns_int[lid].begin() + left->mRecCount, output.begin()); string str = std::string(ltable) + std::string(".") + std::string(rtable) + std::string(".") + std::string(rcolumn) + std::string(".") + to_string(i); thrust::device_vector<int_type> res(left->mRecCount); thrust::host_vector<int_type> res_h(left->mRecCount); if(right->type[rcolumn] == 0) { thrust::gather(output.begin(), output.begin() + left->mRecCount, right->d_columns_int[rcolumn].begin() , res.begin()); thrust::copy(res.begin(), res.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); } else if(right->type[rcolumn] == 1) { } else { //strings string f1 = right->load_file_name + "." + rcolumn + ".0.hash"; //need to change it in case if there are dimensions tables larger than 1 segment ? FILE* f = fopen(f1.c_str(), "rb" ); unsigned int cnt; fread(&cnt, 4, 1, f); if(res_h.size() < cnt) res_h.resize(cnt); if(res.size() < cnt) res.resize(cnt); fread(res_h.data(), cnt*8, 1, f); res = res_h; fclose(f); thrust::device_vector<int_type> output1(left->mRecCount); thrust::gather(output.begin(), output.begin() + left->mRecCount , res.begin(), output1.begin()); thrust::copy(output1.begin(), output1.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); }; }; }; } void emit_display(const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(2, "Filter : couldn't find variable " + string(f) ); }; mystat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) mystat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Display(limit, 0, 1); clean_queues(); if(mystat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; } void emit_filter(char *s, char *f) { statement_count++; if (scan_state == 0) { if (mystat.find(f) == mystat.end() && data_dict.count(f) == 0) { process_error(1, "Filter : couldn't find variable " + string(f)); }; mystat[s] = statement_count; mystat[f] = statement_count; filter_var[s] = f; // check possible use of other variables in filters queue<string> op(op_value); while(!op.empty()) { size_t pos1 = op.front().find_first_of(".", 0); if(pos1 != string::npos) { mystat[op.front().substr(0,pos1)] = statement_count; }; op.pop(); }; check_used_vars(); clean_queues(); return; }; CudaSet *a, *b; a = varNames.find(f)->second; a->name = f; if(a->mRecCount == 0 && !a->filtered) { b = new CudaSet(0,1); } else { if(verbose) cout << "INLINE FILTER " << f << endl; b = a->copyDeviceStruct(); b->name = s; b->sorted_fields = a->sorted_fields; b->presorted_fields = a->presorted_fields; //save the stack b->fil_s = s; b->fil_f = f; b->fil_type = op_type; b->fil_value = op_value; b->fil_nums = op_nums; b->fil_nums_f = op_nums_f; b->fil_nums_precision = op_nums_precision; b->filtered = 1; b->tmp_table = a->tmp_table; b->string_map = a->string_map; if(a->filtered) { b->source_name = a->source_name; b->fil_f = a->fil_f; while(!a->fil_value.empty()) { b->fil_value.push(a->fil_value.front()); a->fil_value.pop(); }; while(!a->fil_type.empty()) { b->fil_type.push(a->fil_type.front()); a->fil_type.pop(); }; b->fil_type.push("AND"); while(!a->fil_nums.empty()) { b->fil_nums.push(a->fil_nums.front()); a->fil_nums.pop(); }; while(!a->fil_nums_precision.empty()) { b->fil_nums_precision.push(a->fil_nums_precision.front()); a->fil_nums_precision.pop(); }; while(!a->fil_nums_f.empty()) { b->fil_nums_f.push(a->fil_nums_f.front()); a->fil_nums_f.pop(); }; a->filtered = 0; varNames.erase(f); } else b->source_name = f; b->maxRecs = a->maxRecs; b->prm_d.resize(a->maxRecs); }; b->hostRecCount = a->hostRecCount; clean_queues(); if (varNames.count(s) > 0) varNames[s]->free(); varNames[s] = b; if(mystat[s] == statement_count) { b->free(); varNames.erase(s); }; } void emit_store(const char *s, const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (mystat.find(s) == mystat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s) ); }; mystat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) mystat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(verbose) cout << "STORE: " << s << " " << f << " " << sep << endl; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Store(f,sep, limit, 0, 0); if(mystat[s] == statement_count && a->keep == 0) { a->free(); varNames.erase(s); }; }; void emit_store_binary(const char *s, const char *f, const bool append) { statement_count++; if (scan_state == 0) { if (mystat.find(s) == mystat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s)); }; mystat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) mystat[filter_var[f]] = statement_count; clean_queues(); return; }; cout << "Append " << append << endl; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(mystat[f] == statement_count) a->deAllocOnDevice(); printf("STORE: %s %s \n", s, f); int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; total_count = 0; total_segments = 0; a->maxRecs = 0; if(fact_file_loaded) { a->Store(f,"", limit, 1, append); } else { FILE* file_p; if(a->text_source) { file_p = fopen(a->load_file_name.c_str(), "rb"); if (!file_p) { process_error(2, "Could not open file " + a->load_file_name ); }; }; thrust::device_vector<char> d_readbuff; thrust::device_vector<char*> dest(a->mColumnCount); thrust::device_vector<unsigned int> ind(a->mColumnCount); thrust::device_vector<unsigned int> dest_len(a->mColumnCount); while(!fact_file_loaded) { if(verbose) cout << "LOADING " << a->load_file_name << " mem: " << getFreeMem() << endl; if(a->text_source) fact_file_loaded = a->LoadBigFile(file_p, d_readbuff, dest, ind, dest_len); if(a->maxRecs < a->mRecCount) a->maxRecs = a->mRecCount; a->Store(f,"", limit, 1, append); }; }; a->writeSortHeader(f); if(mystat[f] == statement_count && !a->keep) { a->free(); varNames.erase(s); }; }; void emit_load_binary(const char *s, const char *f, const int d) { statement_count++; if (scan_state == 0) { mystat[s] = statement_count; return; }; if(verbose) printf("BINARY LOAD: %s %s \n", s, f); std::clock_t start1 = std::clock(); CudaSet *a; unsigned int segCount, maxRecs; string f1(f); f1 += "." + namevars.front() + ".header"; FILE* ff = fopen(f1.c_str(), "rb"); if(!ff) { process_error(2, "Couldn't open file " + f1); }; size_t totRecs; fread((char *)&totRecs, 8, 1, ff); fread((char *)&segCount, 4, 1, ff); fread((char *)&maxRecs, 4, 1, ff); fclose(ff); if(verbose) cout << "Reading " << totRecs << " records" << endl; a = new CudaSet(namevars, typevars, sizevars, cols, totRecs, f, maxRecs); a->segCount = segCount; a->keep = true; a->name = s; varNames[s] = a; if(mystat[s] == statement_count ) { a->free(); varNames.erase(s); }; std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; } void emit_load(const char *s, const char *f, const int d, const char* sep) { statement_count++; if (scan_state == 0) { mystat[s] = statement_count; return; }; printf("LOAD: %s %s %d %s \n", s, f, d, sep); CudaSet *a; a = new CudaSet(namevars, typevars, sizevars, cols, process_count); a->keep = true; a->not_compressed = 1; a->load_file_name = f; a->separator = sep; varNames[s] = a; fact_file_loaded = 0; if(mystat[s] == statement_count) { a->free(); varNames.erase(s); }; } void emit_show_tables() { if (scan_state == 1) { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { cout << (*it).first << endl; }; }; return; } void emit_drop_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) { int seg = 0; string f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); while(!remove(f_name.c_str())) { seg++; f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); }; f_name = (*iter).first + "." + (*it).first + ".header"; remove(f_name.c_str()); }; }; string s_name = (*iter).first + ".presort"; remove(s_name.c_str()); s_name = (*iter).first + ".sort"; remove(s_name.c_str()); if(data_dict.find(table_name) != data_dict.end()) { data_dict.erase(table_name); }; save_dict = 1; }; return; } void emit_describe_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for (auto it=s.begin() ; it != s.end(); ++it ) { if ((*it).second.col_type == 0) { if((*it).second.col_length) { if((*it).second.col_length != UINT_MAX) cout << (*it).first << " decimal with precision of " << (*it).second.col_length << endl; else cout << (*it).first << " timestamp" << endl; } else cout << (*it).first << " integer" << endl; } else if ((*it).second.col_type == 1) { cout << (*it).first << " float" << endl; } else if ((*it).second.col_type == 3) { cout << (*it).first << " decimal" << endl; } else { cout << (*it).first << " char(" << (*it).second.col_length << ")" << endl; }; }; }; }; return; } void yyerror(char *s, ...) { extern int yylineno; extern char *yytext; fprintf(stderr, "%d: error: ", yylineno); cout << yytext << endl; error_cb(1, s); } void clean_queues() { while(!op_type.empty()) op_type.pop(); while(!op_value.empty()) op_value.pop(); while(!op_join.empty()) op_join.pop(); while(!op_nums.empty()) op_nums.pop(); while(!op_nums_f.empty()) op_nums_f.pop(); while(!op_nums_precision.empty()) op_nums_precision.pop(); while(!j_col_count.empty()) j_col_count.pop(); while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); while(!op_sort.empty()) op_sort.pop(); while(!op_presort.empty()) op_presort.pop(); while(!join_type.empty()) join_type.pop(); while(!join_eq_type.empty()) join_eq_type.pop(); op_case = 0; sel_count = 0; join_cnt = 0; join_col_cnt = 0; distinct_cnt = 0; join_tab_cnt = 0; tab_cnt = 0; join_and_cnt.clear(); } void load_vars() { if(used_vars.size() == 0) { //cout << "Error, no valid column names have been found " << endl; //exit(0); } else { for (auto it=used_vars.begin(); it != used_vars.end(); ++it ) { while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); if(mystat.count((*it).first) != 0) { auto c = (*it).second; for (auto sit=c.begin() ; sit != c.end(); ++sit ) { //cout << "name " << (*sit).first << " " << data_dict[(*it).first][(*sit).first].col_length << endl; namevars.push((*sit).first); if(data_dict[(*it).first][(*sit).first].col_type == 0) { if(data_dict[(*it).first][(*sit).first].col_length == 0) { typevars.push("int"); } else { if(data_dict[(*it).first][(*sit).first].col_length == UINT_MAX) typevars.push("timestamp"); else typevars.push("decimal"); } } else if(data_dict[(*it).first][(*sit).first].col_type == 1) typevars.push("float"); else typevars.push("char"); sizevars.push(data_dict[(*it).first][(*sit).first].col_length); cols.push(0); }; emit_load_binary((*it).first.c_str(), (*it).first.c_str(), 0); }; }; }; } void process_error(int severity, string err) { switch (severity) { case 1: err = "(Warning) " + err; break; case 2: err = "(Fatal) " + err; break; default: err = "(Aborting) " + err; break; } error_cb(severity, err.c_str()); // send the error to the c based callback } void alenkaInit(char ** av) { process_count = 1000000000; verbose = 0; scan_state = 1; statement_count = 0; clean_queues(); //context = CreateCudaDevice(0, nullptr, true); } void alenkaClose() { statement_count = 0; if(alloced_sz) { cudaFree(alloced_tmp); alloced_sz = 0; }; }
the_stack
#include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/lists/detail/scatter.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/scatter.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/uninitialized_fill.h> namespace cudf { namespace detail { /** * @brief Convert a scatter map into a gather map. * * The caller is expected to use the output map on a subsequent gather_bitmask() * function using the PASSTHROUGH op since the resulting map may contain index * values outside the target's range. * * First, the gather-map is initialized with an invalid index. * The value `numeric_limits::lowest()` is used since it should always be outside the target size. * Then, `output[scatter_map[i]] = i` for each `i`. * * @tparam MapIterator Iterator type of the input scatter map. * @param scatter_map_begin Beginning of scatter map. * @param scatter_map_end End of the scatter map. * @param gather_rows Number of rows in the output map. * @param stream Stream used for CUDA kernel calls. * @return Output gather map. */ template <typename MapIterator> auto scatter_to_gather(MapIterator scatter_map_begin, MapIterator scatter_map_end, size_type gather_rows, rmm::cuda_stream_view stream) { using MapValueType = typename thrust::iterator_traits<MapIterator>::value_type; // The gather_map is initialized with `numeric_limits::lowest()` value to identify pass-through // entries when calling the gather_bitmask() which applies a pass-through whenever it finds a // value outside the range of the target column. // We'll use the `numeric_limits::lowest()` value for this since it should always be outside the // valid range. auto gather_map = rmm::device_uvector<size_type>(gather_rows, stream); thrust::uninitialized_fill(rmm::exec_policy(stream), gather_map.begin(), gather_map.end(), std::numeric_limits<size_type>::lowest()); // Convert scatter map to a gather map thrust::scatter( rmm::exec_policy(stream), thrust::make_counting_iterator<MapValueType>(0), thrust::make_counting_iterator<MapValueType>(std::distance(scatter_map_begin, scatter_map_end)), scatter_map_begin, gather_map.begin()); return gather_map; } /** * @brief Create a complement map of `scatter_to_gather` map * * The purpose of this map is to create an identity-mapping for the rows that are not * touched by the `scatter_map`. * * The output result of this mapping is firstly initialized as an identity-mapping * (i.e., `output[i] = i`). Then, for each value `idx` from `scatter_map`, the value `output[idx]` * is set to `numeric_limits::lowest()`, which is an invalid, out-of-bound index to identify the * pass-through entries when calling the `gather_bitmask()` function. * */ template <typename MapIterator> auto scatter_to_gather_complement(MapIterator scatter_map_begin, MapIterator scatter_map_end, size_type gather_rows, rmm::cuda_stream_view stream) { auto gather_map = rmm::device_uvector<size_type>(gather_rows, stream); thrust::sequence(rmm::exec_policy(stream), gather_map.begin(), gather_map.end(), 0); auto const out_of_bounds_begin = thrust::make_constant_iterator(std::numeric_limits<size_type>::lowest()); auto const out_of_bounds_end = out_of_bounds_begin + thrust::distance(scatter_map_begin, scatter_map_end); thrust::scatter(rmm::exec_policy(stream), out_of_bounds_begin, out_of_bounds_end, scatter_map_begin, gather_map.begin()); return gather_map; } template <typename Element, typename Enable = void> struct column_scatterer_impl { template <typename... Args> std::unique_ptr<column> operator()(Args&&...) const { CUDF_FAIL("Unsupported type for scatter."); } }; template <typename Element> struct column_scatterer_impl<Element, std::enable_if_t<cudf::is_fixed_width<Element>()>> { template <typename MapIterator> std::unique_ptr<column> operator()(column_view const& source, MapIterator scatter_map_begin, MapIterator scatter_map_end, column_view const& target, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto result = std::make_unique<column>(target, stream, mr); auto result_view = result->mutable_view(); // NOTE use source.begin + scatter rows rather than source.end in case the // scatter map is smaller than the number of source rows thrust::scatter(rmm::exec_policy(stream), source.begin<Element>(), source.begin<Element>() + cudf::distance(scatter_map_begin, scatter_map_end), scatter_map_begin, result_view.begin<Element>()); return result; } }; template <> struct column_scatterer_impl<string_view> { template <typename MapIterator> std::unique_ptr<column> operator()(column_view const& source, MapIterator scatter_map_begin, MapIterator scatter_map_end, column_view const& target, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto d_column = column_device_view::create(source, stream); auto const begin = d_column->begin<string_view>(); auto const end = begin + cudf::distance(scatter_map_begin, scatter_map_end); return strings::detail::scatter(begin, end, scatter_map_begin, target, stream, mr); } }; template <> struct column_scatterer_impl<list_view> { template <typename MapIterator> std::unique_ptr<column> operator()(column_view const& source, MapIterator scatter_map_begin, MapIterator scatter_map_end, column_view const& target, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { return cudf::lists::detail::scatter( source, scatter_map_begin, scatter_map_end, target, stream, mr); } }; template <> struct column_scatterer_impl<dictionary32> { template <typename MapIterator> std::unique_ptr<column> operator()(column_view const& source_in, MapIterator scatter_map_begin, MapIterator scatter_map_end, column_view const& target_in, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { if (target_in.is_empty()) // empty begets empty return make_empty_column(data_type{type_id::DICTIONARY32}); if (source_in.is_empty()) // no input, just make a copy return std::make_unique<column>(target_in, stream, mr); // check the keys match dictionary_column_view const source(source_in); dictionary_column_view const target(target_in); CUDF_EXPECTS(source.keys().type() == target.keys().type(), "scatter dictionary keys must be the same type"); // first combine keys so both dictionaries have the same set auto target_matched = dictionary::detail::add_keys(target, source.keys(), stream, mr); auto const target_view = dictionary_column_view(target_matched->view()); auto source_matched = dictionary::detail::set_keys(source, target_view.keys(), stream); auto const source_view = dictionary_column_view(source_matched->view()); // now build the new indices by doing a scatter on just the matched indices auto source_itr = indexalator_factory::make_input_iterator(source_view.indices()); auto new_indices = std::make_unique<column>(target_view.get_indices_annotated(), stream, mr); auto target_itr = indexalator_factory::make_output_iterator(new_indices->mutable_view()); thrust::scatter(rmm::exec_policy(stream), source_itr, source_itr + std::distance(scatter_map_begin, scatter_map_end), scatter_map_begin, target_itr); // record some data before calling release() auto const indices_type = new_indices->type(); auto const output_size = new_indices->size(); auto const null_count = new_indices->null_count(); auto contents = new_indices->release(); auto indices_column = std::make_unique<column>(indices_type, static_cast<size_type>(output_size), std::move(*(contents.data.release())), rmm::device_buffer{0, stream, mr}, 0); // take the keys from the matched column allocated using mr std::unique_ptr<column> keys_column(std::move(target_matched->release().children.back())); // create column with keys_column and indices_column return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(contents.null_mask.release())), null_count); } }; struct column_scatterer { template <typename Element, typename MapIterator> std::unique_ptr<column> operator()(column_view const& source, MapIterator scatter_map_begin, MapIterator scatter_map_end, column_view const& target, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { column_scatterer_impl<Element> scatterer{}; return scatterer(source, scatter_map_begin, scatter_map_end, target, stream, mr); } }; template <> struct column_scatterer_impl<struct_view> { template <typename MapItRoot> std::unique_ptr<column> operator()(column_view const& source, MapItRoot scatter_map_begin, MapItRoot scatter_map_end, column_view const& target, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { CUDF_EXPECTS(source.num_children() == target.num_children(), "Scatter source and target are not of the same type."); auto const scatter_map_size = std::distance(scatter_map_begin, scatter_map_end); if (scatter_map_size == 0) { return std::make_unique<column>(target, stream, mr); } structs_column_view const structs_src(source); structs_column_view const structs_target(target); std::vector<std::unique_ptr<column>> output_struct_members(structs_src.num_children()); std::transform(structs_src.child_begin(), structs_src.child_end(), structs_target.child_begin(), output_struct_members.begin(), [&scatter_map_begin, &scatter_map_end, stream, mr](auto const& source_col, auto const& target_col) { return type_dispatcher<dispatch_storage_type>(source_col.type(), column_scatterer{}, source_col, scatter_map_begin, scatter_map_end, target_col, stream, mr); }); // We still need to call `gather_bitmask` even when the source's children are not nullable, // as if the target's children have null_masks, those null_masks need to be updated after // being scattered onto. auto const child_nullable = std::any_of(structs_src.child_begin(), structs_src.child_end(), [](auto const& col) { return col.nullable(); }) or std::any_of(structs_target.child_begin(), structs_target.child_end(), [](auto const& col) { return col.nullable(); }); if (child_nullable) { auto const gather_map = scatter_to_gather(scatter_map_begin, scatter_map_end, target.size(), stream); gather_bitmask(cudf::table_view{std::vector<cudf::column_view>{structs_src.child_begin(), structs_src.child_end()}}, gather_map.begin(), output_struct_members, gather_bitmask_op::PASSTHROUGH, stream, mr); } // Need to put the result column in a vector to call `gather_bitmask`. std::vector<std::unique_ptr<column>> result; result.emplace_back(cudf::make_structs_column(target.size(), std::move(output_struct_members), 0, rmm::device_buffer{0, stream, mr}, stream, mr)); // Only gather bitmask from the target column for the rows that have not been scattered onto // The bitmask from the source column will be gathered at the top level `scatter()` call. if (target.nullable()) { auto const gather_map = scatter_to_gather_complement(scatter_map_begin, scatter_map_end, target.size(), stream); gather_bitmask(table_view{std::vector<cudf::column_view>{target}}, gather_map.begin(), result, gather_bitmask_op::PASSTHROUGH, stream, mr); } return std::move(result.front()); } }; /** * @brief Scatters the rows of the source table into a copy of the target table * according to a scatter map. * * Scatters values from the source table into the target table out-of-place, * returning a "destination table". The scatter is performed according to a * scatter map such that row `scatter_begin[i]` of the destination table gets row * `i` of the source table. All other rows of the destination table equal * corresponding rows of the target table. * * The number of columns in source must match the number of columns in target * and their corresponding datatypes must be the same. * * If the same index appears more than once in the scatter map, the result is * undefined. This range might have negative values, which will be modified by adding target.size() * * @throws cudf::logic_error if scatter map index is out of bounds * @throws cudf::logic_error if scatter_map.size() > source.num_rows() * * @param[in] source The input columns containing values to be scattered into the * target columns * @param[in] scatter_map_begin Beginning of iterator range of integer indices that has been *provided. * @param[in] scatter_map_end End of iterator range of integer indices that has been provided. * source columns to rows in the target columns * @param[in] target The set of columns into which values from the source_table * are to be scattered * @param[in] check_bounds Optionally perform bounds checking on the values of * `scatter_map` and throw an error if any of its values are out of bounds. * @param[in] stream CUDA stream used for device memory operations and kernel launches. * @param[in] mr Device memory resource used to allocate the returned table's device memory * * @return Result of scattering values from source to target */ template <typename MapIterator> std::unique_ptr<table> scatter( table_view const& source, MapIterator scatter_map_begin, MapIterator scatter_map_end, table_view const& target, bool check_bounds = false, rmm::cuda_stream_view stream = rmm::cuda_stream_default, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) { CUDF_FUNC_RANGE(); using MapType = typename thrust::iterator_traits<MapIterator>::value_type; if (check_bounds) { auto const begin = -target.num_rows(); auto const end = target.num_rows(); auto bounds = bounds_checker<MapType>{begin, end}; CUDF_EXPECTS( std::distance(scatter_map_begin, scatter_map_end) == thrust::count_if(rmm::exec_policy(stream), scatter_map_begin, scatter_map_end, bounds), "Scatter map index out of bounds"); } CUDF_EXPECTS(std::distance(scatter_map_begin, scatter_map_end) <= source.num_rows(), "scatter map size should be <= to number of rows in source"); // Transform negative indices to index + target size. auto updated_scatter_map_begin = thrust::make_transform_iterator(scatter_map_begin, index_converter<MapType>{target.num_rows()}); auto updated_scatter_map_end = thrust::make_transform_iterator(scatter_map_end, index_converter<MapType>{target.num_rows()}); auto result = std::vector<std::unique_ptr<column>>(target.num_columns()); std::transform(source.begin(), source.end(), target.begin(), result.begin(), [=](auto const& source_col, auto const& target_col) { return type_dispatcher<dispatch_storage_type>(source_col.type(), column_scatterer{}, source_col, updated_scatter_map_begin, updated_scatter_map_end, target_col, stream, mr); }); // We still need to call `gather_bitmask` even when the source columns are not nullable, // as if the target has null_mask, that null_mask needs to be updated after scattering. auto const nullable = std::any_of(source.begin(), source.end(), [](auto const& col) { return col.nullable(); }) or std::any_of(target.begin(), target.end(), [](auto const& col) { return col.nullable(); }); if (nullable) { auto const gather_map = scatter_to_gather( updated_scatter_map_begin, updated_scatter_map_end, target.num_rows(), stream); gather_bitmask(source, gather_map.begin(), result, gather_bitmask_op::PASSTHROUGH, stream, mr); // For struct columns, we need to superimpose the null_mask of the parent over the null_mask of // the children. std::for_each(result.begin(), result.end(), [=](auto& col) { auto const col_view = col->view(); if (col_view.type().id() == type_id::STRUCT and col_view.nullable()) { auto const num_rows = col_view.size(); auto const null_count = col_view.null_count(); auto contents = col->release(); // Children null_mask will be superimposed during structs column construction. col = cudf::make_structs_column(num_rows, std::move(contents.children), null_count, std::move(*contents.null_mask), stream, mr); } }); } return std::make_unique<table>(std::move(result)); } } // namespace detail } // namespace cudf
the_stack
namespace fastertransformer { template <typename T> __global__ void self_attention_kernel(const int* memory_sequence_length, T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const int start_len, const T scalar) { extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T*>(s_buf); T* logits = reinterpret_cast<T*>(&sq[size_per_head]); int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if (tid < size_per_head) sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id]; __syncthreads(); // offset for each step int offset = batch_size * head_num * size_per_head; for (int ite = 0; ite < step; ++ite) { T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f; // for the last step, we should update K + bias_K to the cache if (ite == step - 1 && tid < size_per_head) { key = key_buf[qkv_id] + self_K_bias[qkv_bias_id]; key_cache[ite * offset + qkv_id] = key; } T val = (tid < size_per_head) ? key * sq[tid] * (T)(scalar) : (T)(0.0f); T qk = blockReduceSum(val); if (threadIdx.x == 0) { logits[ite] = qk; } __syncthreads(); // try to remove } __syncthreads(); // try to remove __shared__ float s_max_val, s_sum; float local_i = (tid >= (start_len - memory_sequence_length[bid]) && (tid < step)) ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if (tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = (tid >= (start_len - memory_sequence_length[bid]) && (tid < step)) ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if (tid == 0) s_sum = val; // + 1e-6; __syncthreads(); if (tid >= (start_len - memory_sequence_length[bid]) && (tid < step)) { logits[tid] = local_o / s_sum; } else if (tid < step) { logits[tid] = static_cast<T>(0.0f); } __syncthreads(); if (tid < size_per_head) { T sum = (T)0.0f; for (int ite = 0; ite < step; ++ite) { T value = value_cache[ite * offset + qkv_id]; // for the last step, we should update V + bias_V to the cache if (ite == step - 1) { value = value_buf[qkv_id] + self_V_bias[qkv_bias_id]; value_cache[ite * offset + qkv_id] = value; } sum += value * logits[ite]; } context_buf[qkv_id] = sum; } } template <typename T> void self_attention_dispatch(const int* memory_sequence_length, T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const int start_len, cudaStream_t stream) { const int block_sz = ATTENTION_BLOCK_SIZE; T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f)); dim3 grid(batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0); switch (cond) { /*case 32: masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 64: if(sizeof(T) == 2) masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz, sizeof(float)*step, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 128: if(sizeof(T) == 2) masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz, sizeof(float)*step, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break;*/ default: // default path int block_size = 128; // suppose size_per_head <= 128 if (step <= 64) block_size = 64; else if (step <= 128 && step > size_per_head) block_size = 128; else if (step > 128 && step <= 256) block_size = 256; else if (step > 256 && step <= 512) block_size = 512; else block_size = 1024; if ((int)block_size < size_per_head) { block_size = size_per_head; } assert(block_size <= 1024); dim3 block(block_size); T scalar = 1 / sqrtf(size_per_head * 1.0f); int shared_size = sizeof(T) * (size_per_head + step); self_attention_kernel<T><<<grid, block, shared_size, stream>>>( memory_sequence_length, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, size_per_head, step, start_len, scalar); #ifndef NDEBUG cudaDeviceSynchronize(); check_cuda_error(cudaGetLastError()); #endif } } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::self_multi_head_attention( const DataType_* from_tensor, const int* memory_sequence_length, DataType_* key_cache_, DataType_* value_cache_, DataType_* decoder_output, const int step, const int start_len) { int m = batch_size_; int n = hidden_units_; int k = hidden_units_; DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f; if (is_fuse_QKV == true) { check_cuda_error( cublasGemmBatchedEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, (const void* const*)qkv_kernel_, AType_, n, (const void* const*)qkv_input_, BType_, k, &beta, (void* const*)qkv_buf_, CType_, n, 3, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[4]))); } else { key_buf_ = key_cache_ + (step - 1) * m * n; value_buf_ = value_cache_ + (step - 1) * m * n; check_cuda_error( cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.query_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, query_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error( cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.key_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, key_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error( cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.value_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, value_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); } self_attention_dispatch<DataType_>(memory_sequence_length, key_buf_, value_buf_, query_buf_, param_.self_attention.query_weight.bias, key_cache_, param_.self_attention.key_weight.bias, value_cache_, param_.self_attention.value_weight.bias, context_buf_, batch_size_, head_num_, size_per_head_, step, start_len, param_.stream); check_cuda_error( cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.attention_output_weight.kernel, AType_, n, context_buf_, BType_, k, &beta, decoder_output, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::decoder_norm1(const DataType_* input, const DataType_* gamma, const DataType_* beta, DataType_* output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if (n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x /* should pay attention to the rsqrt precision*/ // assert(block.x <= 1024); // decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, // gamma, beta, output, m, n); decoder_norm1_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>( input, gamma, beta, output, m, n); // For gpt-3 } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::decoder_norm2(const DataType_* input, const DataType_* gamma, const DataType_* beta, const DataType_* bias, DataType_* output, DataType_* norm_output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if (n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x /* should pay attention to the rsqrt precision*/ // assert(block.x <= 1024); // decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, // gamma, beta, bias, output, norm_output, m, n); decoder_norm2_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>( input, gamma, beta, bias, output, norm_output, m, n); // For gpt-3 } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::ffn(const DataType_* input, DataType_* ffn_inner, DataType_* output, const int m, const int inner_size, const int n, ActivationType activation_type) { int m1 = m, k1 = n, n1 = inner_size; DataType_ alpha = (DataType_)1.0f; DataType_ beta = (DataType_)0.0f; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n1, m1, k1, &alpha, param_.ffn.intermediate_weight.kernel, AType_, n1, input, BType_, k1, &beta, ffn_inner, CType_, n1, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[2]))); // dim3 grid(min(m1, 65536)); // dim3 block(min(n1 / 4, 1024)); // // TODO remove this limitation // // assert(block.x <= 1024); // if(activation_type == ActivationType::RELU) // add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, // param_.ffn.intermediate_weight.bias, m1, n1); // else if(activation_type == ActivationType::GELU) // add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, // param_.ffn.intermediate_weight.bias, m1, n1); dim3 block(min((int)(n1 / 4 / (4 / sizeof(DataType_))), 1024)); dim3 grid(min(m1 * n1 / block.x, 65536)); if (activation_type == ActivationType::RELU) add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>( ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_))); else if (activation_type == ActivationType::GELU) add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>( ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_))); int m2 = m, n2 = n, k2 = inner_size; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n2, m2, k2, &alpha, param_.ffn.output_weight.kernel, AType_, n2, ffn_inner, BType_, k2, &beta, output, CType_, n2, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[3]))); } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::add_bias_act( DataType_* input, const DataType_* bias, int m, int n, cudaStream_t stream, ActivationType activation_type = ActivationType::GELU) { dim3 block_(min((int)(n / 4 / (4 / sizeof(DataType_))), 1024)); dim3 grid_(min(m * n / block_.x, 65536)); if (activation_type == ActivationType::RELU) add_bias_relu<DataType_><<<grid_, block_, 0, stream>>>( input, bias, m, n / (4 / sizeof(DataType_))); else if (activation_type == ActivationType::GELU) add_bias_gelu<DataType_><<<grid_, block_, 0, stream>>>( input, bias, m, n / (4 / sizeof(DataType_))); } template <OperationType OpType_> void OpenTransformerDecoder<OpType_>::add_bias_input(DataType_* output, const DataType_* input, const int m, const int n) { dim3 grid(min(m, 65536)); dim3 block(min(n, 1024)); add_bias_input_kernel_generalize<<<grid, block, 0, param_.stream>>>( output, input, param_.ffn.output_weight.bias, m, n); } template void OpenTransformerDecoder<OperationType::FP32>::self_multi_head_attention( const float* from_tensor, const int* memory_sequence_length, float* key_cache, float* value_cache, float* decoder_output, const int step, const int start_len); template void OpenTransformerDecoder<OperationType::FP16>::self_multi_head_attention( const half* from_tensor, const int* memory_sequence_length, half* key_cache, half* value_cache, half* decoder_output, const int step, const int start_len); template void OpenTransformerDecoder<OperationType::FP32>::ffn( const float* input, float* ffn_inner, float* otuput, const int m, const int inner_size, const int n, ActivationType activation_type); template void OpenTransformerDecoder<OperationType::FP16>::ffn( const half* input, half* ffn_inner, half* otuput, const int m, const int inner_size, const int n, ActivationType activation_type); template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm1( const float* input, const float* gamma, const float* beta, float* output, int m, int n); template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm1( const half* input, const half* gamma, const half* beta, half* output, int m, int n); template void OpenTransformerDecoder<OperationType::FP32>::decoder_norm2( const float* input, const float* gamma, const float* beta, const float* bias, float* output, float* norm_output, int m, int n); template void OpenTransformerDecoder<OperationType::FP16>::decoder_norm2( const half* input, const half* gamma, const half* beta, const half* bias, half* output, half* norm_output, int m, int n); template void OpenTransformerDecoder<OperationType::FP32>::add_bias_act( float* input, const float* bias, int m, int n, cudaStream_t stream, ActivationType activation_type); template void OpenTransformerDecoder<OperationType::FP16>::add_bias_act( half* input, const half* bias, int m, int n, cudaStream_t stream, ActivationType activation_type); template void OpenTransformerDecoder<OperationType::FP32>::add_bias_input( float* output, const float* input, const int m, const int n); template void OpenTransformerDecoder<OperationType::FP16>::add_bias_input( half* output, const half* input, const int m, const int n); } // namespace FasterTransformer
the_stack
extern "C" { #include "sph/sph_blake.h" #include "sph/sph_bmw.h" #include "sph/sph_groestl.h" #include "sph/sph_skein.h" #include "sph/sph_jh.h" #include "sph/sph_keccak.h" } #include "miner.h" #include "cuda_helper.h" static uint32_t *d_hash[MAX_GPUS]; // Speicher zur Generierung der Noncevektoren für die bedingten Hashes uint32_t *d_branch1Nonces[MAX_GPUS]; uint32_t *d_branch2Nonces[MAX_GPUS]; uint32_t *d_branch3Nonces[MAX_GPUS]; extern void quark_blake512_cpu_init(int thr_id); extern void quark_blake512_cpu_setBlock_80(uint64_t *pdata); extern void quark_blake512_cpu_setBlock_80_multi(uint32_t thr_id, uint64_t *pdata); extern void quark_blake512_cpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *d_hash); extern void quark_blake512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_bmw512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_bmw512_cpu_hash_64_quark(uint32_t threads, uint32_t startNounce, uint32_t *d_hash); extern void quark_groestl512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_skein512_cpu_init(int thr_id); extern void quark_skein512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_keccakskein512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_keccak512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, uint32_t target, uint32_t *h_found); extern void quark_keccak512_cpu_init(int thr_id); extern void quark_jh512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash); extern void quark_jh512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, uint32_t target, uint32_t *h_found); extern void quark_jh512_cpu_init(int thr_id); extern void quark_compactTest_cpu_init(int thr_id, uint32_t threads); extern void quark_compactTest_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, uint32_t *d_nonces1, uint32_t *nrm1, uint32_t *d_nonces2, uint32_t *nrm2); extern void quark_compactTest_single_false_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *inpHashes, uint32_t *d_validNonceTable, uint32_t *d_nonces1, uint32_t *nrm1); extern uint32_t cuda_check_hash_branch(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_inputHash); extern void cuda_check_quarkcoin(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_inputHash, uint32_t *foundnonces); // Original Quarkhash Funktion aus einem miner Quelltext extern "C" void quarkhash(void *state, const void *input) { sph_blake512_context ctx_blake; sph_bmw512_context ctx_bmw; sph_groestl512_context ctx_groestl; sph_jh512_context ctx_jh; sph_keccak512_context ctx_keccak; sph_skein512_context ctx_skein; unsigned char hash[64]; sph_blake512_init(&ctx_blake); sph_blake512 (&ctx_blake, input, 80); sph_blake512_close(&ctx_blake, (void*) hash); sph_bmw512_init(&ctx_bmw); sph_bmw512 (&ctx_bmw, (const void*) hash, 64); sph_bmw512_close(&ctx_bmw, (void*) hash); if (hash[0] & 0x8) { sph_groestl512_init(&ctx_groestl); sph_groestl512 (&ctx_groestl, (const void*) hash, 64); sph_groestl512_close(&ctx_groestl, (void*) hash); } else { sph_skein512_init(&ctx_skein); sph_skein512 (&ctx_skein, (const void*) hash, 64); sph_skein512_close(&ctx_skein, (void*) hash); } sph_groestl512_init(&ctx_groestl); sph_groestl512 (&ctx_groestl, (const void*) hash, 64); sph_groestl512_close(&ctx_groestl, (void*) hash); sph_jh512_init(&ctx_jh); sph_jh512 (&ctx_jh, (const void*) hash, 64); sph_jh512_close(&ctx_jh, (void*) hash); if (hash[0] & 0x8) { sph_blake512_init(&ctx_blake); sph_blake512 (&ctx_blake, (const void*) hash, 64); sph_blake512_close(&ctx_blake, (void*) hash); } else { sph_bmw512_init(&ctx_bmw); sph_bmw512 (&ctx_bmw, (const void*) hash, 64); sph_bmw512_close(&ctx_bmw, (void*) hash); } sph_keccak512_init(&ctx_keccak); sph_keccak512 (&ctx_keccak, (const void*) hash, 64); sph_keccak512_close(&ctx_keccak, (void*) hash); sph_skein512_init(&ctx_skein); sph_skein512 (&ctx_skein, (const void*) hash, 64); sph_skein512_close(&ctx_skein, (void*) hash); if (hash[0] & 0x8) { sph_keccak512_init(&ctx_keccak); sph_keccak512 (&ctx_keccak, (const void*) hash, 64); sph_keccak512_close(&ctx_keccak, (void*) hash); } else { sph_jh512_init(&ctx_jh); sph_jh512 (&ctx_jh, (const void*) hash, 64); sph_jh512_close(&ctx_jh, (void*) hash); } memcpy(state, hash, 32); } static bool init[MAX_GPUS] = { 0 }; static uint32_t endiandata[MAX_GPUS][20]; static uint32_t foundnonces[MAX_GPUS][2]; static uint32_t foundnonces2[MAX_GPUS][2]; extern "C" int scanhash_quark(int thr_id, uint32_t *pdata, uint32_t *ptarget, uint32_t max_nonce, unsigned long *hashes_done) { const uint32_t first_nonce = pdata[19]; uint32_t intensity = 256*256*57; intensity = intensity + ((1 << 22)); cudaDeviceProp props; cudaGetDeviceProperties(&props, device_map[thr_id]); if (device_sm[device_map[thr_id]] > 500) intensity= 1 << 24; if (strstr(props.name, "980 Ti")) { intensity = 1 << 25; } else if (strstr(props.name, "980")) { intensity = 1 << 25; } uint32_t throughput = device_intensity(device_map[thr_id], __func__, intensity); // 256*4096 throughput = min(throughput, max_nonce - first_nonce); if (opt_benchmark) ((uint32_t*)ptarget)[7] =0x2f; if (!init[thr_id]) { CUDA_SAFE_CALL(cudaSetDevice(device_map[thr_id])); if (!opt_cpumining) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); if (opt_n_gputhreads == 1) { cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); } // Konstanten kopieren, Speicher belegen CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], 16 * sizeof(uint32_t) * throughput)); quark_compactTest_cpu_init(thr_id, throughput); uint32_t noncebuffersize = throughput * 7 / 10; uint32_t noncebuffersize2 = (throughput * 7 / 10)*7/10; cudaMalloc(&d_branch1Nonces[thr_id], sizeof(uint32_t)*noncebuffersize2); cudaMalloc(&d_branch2Nonces[thr_id], sizeof(uint32_t)*noncebuffersize2); cudaMalloc(&d_branch3Nonces[thr_id], sizeof(uint32_t)*noncebuffersize); quark_blake512_cpu_init(thr_id); quark_keccak512_cpu_init(thr_id); quark_jh512_cpu_init(thr_id); CUDA_SAFE_CALL(cudaGetLastError()); init[thr_id] = true; } for (int k=0; k < 20; k++) be32enc(&endiandata[thr_id][k], ((uint32_t*)pdata)[k]); cuda_check_cpu_setTarget(ptarget); if (opt_n_gputhreads > 1) { quark_blake512_cpu_setBlock_80_multi(thr_id, (uint64_t *)endiandata[thr_id]); } else { quark_blake512_cpu_setBlock_80((uint64_t *)endiandata[thr_id]); } do { uint32_t nrm1 = 0, nrm2 = 0, nrm3 = 0; quark_blake512_cpu_hash_80( throughput, pdata[19], d_hash[thr_id]); quark_bmw512_cpu_hash_64_quark(throughput, pdata[19],d_hash[thr_id]); quark_compactTest_single_false_cpu_hash_64(thr_id, throughput, pdata[19], d_hash[thr_id], NULL, d_branch3Nonces[thr_id], &nrm3); // nur den Skein Branch weiterverfolgen quark_skein512_cpu_hash_64(nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id]); // das ist der unbedingte Branch für Groestl512 quark_groestl512_cpu_hash_64(nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id]); // das ist der unbedingte Branch für JH512 quark_jh512_cpu_hash_64(nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id]); // quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], d_branch1Nonces[thr_id], &nrm1, d_branch2Nonces[thr_id], &nrm2); // das ist der bedingte Branch für Blake512 quark_blake512_cpu_hash_64(nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id]); // das ist der bedingte Branch für Bmw512 quark_bmw512_cpu_hash_64(nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id]); quark_keccakskein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id]); // quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8) quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id], d_branch1Nonces[thr_id], &nrm1, d_branch3Nonces[thr_id], &nrm2); quark_keccak512_cpu_hash_64_final(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], ptarget[7], &foundnonces2[thr_id][0]); quark_jh512_cpu_hash_64_final(thr_id, nrm2, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], ptarget[7], &foundnonces[thr_id][0]); if (foundnonces[thr_id][0] != 0xffffffff) { const uint32_t Htarg = ptarget[7]; uint32_t vhash64[8]; be32enc(&endiandata[thr_id][19], foundnonces[thr_id][0]); quarkhash(vhash64, endiandata[thr_id]); if (vhash64[7] <= Htarg && fulltest(vhash64, ptarget)) { int res = 1; *hashes_done = pdata[19] - first_nonce + throughput; // check if there was some other ones... if (foundnonces2[thr_id][0] != 0xffffffff) { const uint32_t Htarg = ptarget[7]; uint32_t vhash64[8]; be32enc(&endiandata[thr_id][19], foundnonces2[thr_id][0]); quarkhash(vhash64, endiandata[thr_id]); if (vhash64[7] <= Htarg && fulltest(vhash64, ptarget)) { pdata[21] = foundnonces2[thr_id][0]; res++; if (opt_benchmark) applog(LOG_INFO, "GPU #%d: Found second nonce $%08X", thr_id, foundnonces2[thr_id][0]); } else { if (vhash64[7] != Htarg) // don't show message if it is equal but fails fulltest applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU!", thr_id, foundnonces2[thr_id][0]); } } else if (foundnonces[thr_id][1] != 0xffffffff) { pdata[21] = foundnonces[thr_id][1]; res++; if (opt_benchmark) applog(LOG_INFO, "GPU #%d: Found second nonce $%08X", thr_id, foundnonces[thr_id][1]); } if (opt_benchmark) applog(LOG_INFO, "GPU #%d: Found nonce $%08X", thr_id, foundnonces[thr_id][0]); pdata[19] = foundnonces[thr_id][0]; return res; } else { if (vhash64[7] != Htarg) // don't show message if it is equal but fails fulltest applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU!", thr_id, foundnonces[thr_id][0]); } } if (foundnonces2[thr_id][0] != 0xffffffff) { const uint32_t Htarg = ptarget[7]; uint32_t vhash64[8]; be32enc(&endiandata[thr_id][19], foundnonces2[thr_id][0]); quarkhash(vhash64, endiandata[thr_id]); if (vhash64[7] <= Htarg && fulltest(vhash64, ptarget)) { int res = 1; *hashes_done = pdata[19] - first_nonce + throughput; // check if there was some other ones... if (foundnonces2[thr_id][1] != 0xffffffff) { pdata[21] = foundnonces2[thr_id][1]; res++; if (opt_benchmark) applog(LOG_INFO, "GPU #%d: Found second nonce $%08X", thr_id, foundnonces2[thr_id][1]); } if (opt_benchmark) applog(LOG_INFO, "GPU #%d: Found nonce $%08X", thr_id, foundnonces2[thr_id][0]); pdata[19] = foundnonces2[thr_id][0]; return res; } else { if (vhash64[7] != Htarg) // don't show message if it is equal but fails fulltest applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU!", thr_id, foundnonces2[thr_id][0]); } } pdata[19] += throughput; } while (!scan_abort_flag && !work_restart[thr_id].restart && ((uint64_t)max_nonce > ((uint64_t)(pdata[19]) + (uint64_t)throughput))); *hashes_done = pdata[19] - first_nonce; return 0; }
the_stack
#include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <ATen/cuda/ApplyGridUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> #include <c10/cuda/CUDAMathCompat.h> #include <c10/core/Scalar.h> namespace at { namespace native { // ----------------------------------- // glu forward // ----------------------------------- void glu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "glu_cuda", [&]() { using acc_t = at::acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t { const acc_t a = a_; const acc_t b = b_; const acc_t one = acc_t(1); const acc_t sigmoid = one / (one + std::exp(-b)); return a * sigmoid; }); }); } // ----------------------------------- // glu backward // ----------------------------------- // Byte offsets don't require multiplication by sizeof(T), so are slightly cheaper. // For fixed offsets, this removes all penalty from 64-bit indexing. template <typename T> __device__ T* byte_offset(T* ptr, int64_t offset) { using byte_ptr_t = typename std::conditional< std::is_const<T>::value, const char*, char*>::type; return reinterpret_cast<T*>( reinterpret_cast<byte_ptr_t>(ptr) + offset ); } template <typename scalar_t, typename OffsetCalc> __global__ void glu_backward_kernel( int numel, scalar_t* gI, const scalar_t* I, const scalar_t* gO, OffsetCalc offset_calculator, int64_t gI_byte_offset, int64_t I_byte_offset) { using acc_t = at::acc_type<scalar_t, true>; const uint32_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= numel) { return; } const auto offsets = offset_calculator.get(linear_index); // We explicitly iterate over the first half of the input tensor, and // gI_byte_offset and I_byte_offset are the offsets to access the // corresponding index in the second half of the tensor. const acc_t a = I[offsets[1]]; const acc_t b = *byte_offset(I + offsets[1], I_byte_offset); const acc_t gO_val = gO[offsets[2]]; const auto one = acc_t(1); const acc_t sigmoid = one / (one + std::exp(-b)); auto* gA = gI + offsets[0]; *gA = sigmoid * gO_val; auto* gB = byte_offset(gA, gI_byte_offset); *gB = (one - sigmoid) * sigmoid * gO_val * a; } void launch_glu_backward_kernel(const TensorIteratorBase& iter, int64_t gI_stride, int64_t I_stride) { const auto N = iter.numel(); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(N > 0 && N <= std::numeric_limits<int32_t>::max()); const auto offset_calculator = make_element_offset_calculator<3>(iter); constexpr int64_t block_size = 256; const int64_t grid = (N + block_size - 1) / block_size; const auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "glu_backward_cuda", [&] { auto gI = static_cast<scalar_t*>(iter.data_ptr(0)); auto I = static_cast<const scalar_t*>(iter.data_ptr(1)); auto gO = static_cast<const scalar_t*>(iter.data_ptr(2)); glu_backward_kernel<<<grid, block_size, 0, stream>>>( N, gI, I, gO, offset_calculator, gI_stride * sizeof(scalar_t), I_stride * sizeof(scalar_t)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } // ----------------------------------- // log_sigmoid forward // ----------------------------------- void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(), "log_sigmoid_forward_cuda", [&] { using acc_t = acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t in_) -> scalar_t { const acc_t in = in_; const auto min = std::min(acc_t(0), in); const auto z = std::exp(-std::abs(in)); return min - std::log1p(z); }); }); } // ----------------------------------- // log_sigmoid backward // ----------------------------------- void log_sigmoid_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.common_dtype(), "log_sigmoid_backward_cuda", [&] { using acc_t = acc_type<scalar_t, true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t in_, scalar_t grad_out_) -> scalar_t { const acc_t in = in_; const acc_t grad_out = grad_out_; auto in_negative = in < acc_t(0); auto max_deriv = in_negative ? acc_t(1) : acc_t(0); auto sign = in_negative ? acc_t(1) : -acc_t(1); const auto z = std::exp(-std::abs(in)); return grad_out * (max_deriv - sign * (z / (acc_t(1) + z))); }); }); } // ----------------------------------- // prelu forward // ----------------------------------- void launch_prelu_cuda_kernel_share_weights(TensorIteratorBase &iter, const TensorBase &weight) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.input_dtype(), "prelu_cuda", [&] { const auto *weight_data = weight.data_ptr<scalar_t>(); at::native::gpu_kernel(iter, [weight_data] GPU_LAMBDA (scalar_t input_val) { return (input_val > 0) ? input_val : *weight_data * input_val; }); }); } template <typename scalar_t> __global__ void prelu_cuda_kernel_multi_weights( scalar_t* result_data, const scalar_t* input_data, const scalar_t* weight_data, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; // multiply values at each channel with weight[channel_index] int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; } void launch_prelu_cuda_kernel_multi_weights( const TensorBase &result, const TensorBase &input, const TensorBase &weight) { int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input auto strides = input.strides(); input_stride0 = strides[0]; input_stride1 = strides[1]; } const int64_t weight_num = weight.numel(); TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { prelu_cuda_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( result.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } // ----------------------------------- // prelu backward // ----------------------------------- void launch_prelu_cuda_backward_kernel_share_weights( TensorIteratorBase &iter, const TensorBase &weight) { // N.B. `std::tuple` does not support `::operator=` on device code. AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.input_dtype(), "prelu_backward_cuda", [&] { const auto *weight_data = weight.data_ptr<scalar_t>(); gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> { scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; return {input_grad, weight_grad_collector}; }); }); } template <typename scalar_t> __global__ void prelu_cuda_backward_kernel_multi_weights( const scalar_t* input_data, const scalar_t* weight_data, const scalar_t* grad_out_data, scalar_t* input_grad_data, scalar_t* weight_grad_collector, int64_t input_stride0, int64_t input_stride1, int64_t input_numel) { int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; if (linearId >= input_numel) return; int64_t channel = (linearId % input_stride0) / input_stride1; scalar_t input_data_val = input_data[linearId]; scalar_t grad_out_data_val = grad_out_data[linearId]; input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; } void launch_prelu_cuda_backward_kernel_multi_weights( const TensorBase &input, const TensorBase &weight, const TensorBase &grad_out, const TensorBase &input_grad, const TensorBase &weight_grad_collector) { int64_t input_ndim = input.dim(); TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); int64_t channel_size = 1; // channel_size default to 1 int64_t input_stride0 = 1, input_stride1 = 1; if (input_ndim > 1) { channel_size = input.size(1); // channel is the 2nd dim of input auto strides = input.strides(); input_stride0 = strides[0]; input_stride1 = strides[1]; } const int64_t weight_num = weight.numel(); TORCH_CHECK(channel_size == weight_num, "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, " and channel size = ", channel_size, "."); // config to run cuda kernel int64_t input_numel = input.numel(); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { prelu_cuda_backward_kernel_multi_weights<scalar_t> <<<grid, block, 0, stream>>>( input.data_ptr<scalar_t>(), weight.data_ptr<scalar_t>(), grad_out.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), weight_grad_collector.data_ptr<scalar_t>(), input_stride0, input_stride1, input_numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } // ----------------------------------- // hardshrink // ----------------------------------- void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; }); }); } void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); }); }); } void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { auto lambd = value.to<scalar_t>(); gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; }); }); } void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) { AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { auto min_val = min.to<scalar_t>(); auto max_val = max.to<scalar_t>(); gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; }); }); } void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta; }); }); } void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { auto beta = beta_.to<scalar_t>(); auto threshold = threshold_.to<scalar_t>(); gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { scalar_t z = std::exp(b * beta); return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.)); }); }); } template <typename scalar_t> void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) { gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { return x <= threshold ? value : other; }); } static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>()); }); } void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; }); }); } void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>(); auto poscoef = scale.to<scalar_t>(); auto negiptcoef = input_scale.to<scalar_t>(); gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { if (is_result) { return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; } else { return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef; } }); }); } void GeluCUDAKernelImpl(TensorIteratorBase& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { return static_cast<T_ACC>(x) * c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); }); }); } void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { using T_ACC = acc_type<scalar_t, true>; gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x)); const T_ACC pdf = c10::cuda::compat::exp( T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) * kBeta; return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf); }); }); } namespace { void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { return a > scalar_t(0) ? a : a * negval; }); }); } void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { auto negval = negval_.to<scalar_t>(); gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a > scalar_t(0) ? b : b * negval; }); }); } void hardswish_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return x * std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardswish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_half(0.5f); gpu_kernel( iter, [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); if (self_val < neg_three) { return zero; } else if (self_val <= three) { return grad_val * ((self_val / three) + one_half); } else { return grad_val; } }); }); } void hardsigmoid_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC one_sixth(1.0f / 6.0f); const T_ACC three(3.0f); const T_ACC six(6.0f); gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { T_ACC x = static_cast<T_ACC>(self_val); return std::min(std::max(x + three, zero), six) * one_sixth; }); }); } void hardsigmoid_backward_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC zero(0.0f); const T_ACC three(3.0f); const T_ACC neg_three(-3.0f); const T_ACC one_sixth(1.0f / 6.0f); gpu_kernel( iter, [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { T_ACC grad_val = static_cast<T_ACC>(grad_val_); T_ACC self_val = static_cast<T_ACC>(self_val_); return (self_val > neg_three && self_val < three) ? grad_val * one_sixth : zero; }); }); } void silu_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); }); }); } void silu_backward_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "silu_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); }); }); } void mish_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC x_acc = static_cast<T_ACC>(x); return x_acc * c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); }); }); } void mish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_backward_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using T_ACC = acc_type<scalar_t, true>; const T_ACC dy_acc = static_cast<T_ACC>(dy); const T_ACC x_acc = static_cast<T_ACC>(x); const T_ACC s_acc = T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); const T_ACC t_acc = c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc)); }); }); } } // namespace REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel); REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); REGISTER_DISPATCH(elu_stub, &elu_kernel); REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); REGISTER_DISPATCH(glu_stub, &glu_kernel); REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); REGISTER_DISPATCH(softplus_stub, &softplus_kernel); REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); REGISTER_DISPATCH(silu_stub, &silu_kernel); REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); REGISTER_DISPATCH(mish_stub, &mish_kernel); REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel); REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda); } // namespace native } // namespace at
the_stack
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define POSE_CHANNELS 4 template <typename Dtype> __global__ void AveragedistanceForward(const int nthreads, const Dtype* prediction, const Dtype* target, const Dtype* weight, const Dtype* point, const Dtype* symmetry, const int batch_size, const int num_classes, const int num_points, const float hard_angle, Dtype* rotations, Dtype* losses, Dtype* diffs, Dtype* angles_batch) { CUDA_1D_KERNEL_LOOP(index_thread, nthreads) { // batch index int n = index_thread / num_points; int p = index_thread % num_points; // find the class label and pose of this object int index_cls = -1, ind; Dtype s, u, v, w; for (int i = 0; i < POSE_CHANNELS * num_classes; i += POSE_CHANNELS) { int index = n * POSE_CHANNELS * num_classes + i; if (weight[index] > 0) { index_cls = i / POSE_CHANNELS; // gt quaternion s = target[index + 0]; u = target[index + 1]; v = target[index + 2]; w = target[index + 3]; // gt rotation matrix ind = n * num_points * 6 * 9 + p * 6 * 9; rotations[ind + 0] = s * s + u * u - v * v - w * w; rotations[ind + 1] = 2 * (u * v - s * w); rotations[ind + 2] = 2 * (u * w + s * v); rotations[ind + 3] = 2 * (u * v + s * w); rotations[ind + 4] = s * s - u * u + v * v - w * w; rotations[ind + 5] = 2 * (v * w - s * u); rotations[ind + 6] = 2 * (u * w - s * v); rotations[ind + 7] = 2 * (v * w + s * u); rotations[ind + 8] = s * s - u * u - v * v + w * w; // predicted quaternion s = prediction[index + 0]; u = prediction[index + 1]; v = prediction[index + 2]; w = prediction[index + 3]; // predicted rotation matrix ind = n * num_points * 6 * 9 + p * 6 * 9 + 9; rotations[ind + 0] = s * s + u * u - v * v - w * w; rotations[ind + 1] = 2 * (u * v - s * w); rotations[ind + 2] = 2 * (u * w + s * v); rotations[ind + 3] = 2 * (u * v + s * w); rotations[ind + 4] = s * s - u * u + v * v - w * w; rotations[ind + 5] = 2 * (v * w - s * u); rotations[ind + 6] = 2 * (u * w - s * v); rotations[ind + 7] = 2 * (v * w + s * u); rotations[ind + 8] = s * s - u * u - v * v + w * w; // compute the angular distance between quarternions if (p == 0) { Dtype d = target[index + 0] * prediction[index + 0] + target[index + 1] * prediction[index + 1] + target[index + 2] * prediction[index + 2] + target[index + 3] * prediction[index + 3]; Dtype angle = acos(2 * d * d - 1) * 180.0 / 3.14159265; if (angle > hard_angle) angles_batch[n] = 1.0; } break; } } if (index_cls == -1) continue; // derivatives of Ru to quaternion ind = n * num_points * 6 * 9 + p * 6 * 9 + 18; rotations[ind + 0] = 2 * s; rotations[ind + 1] = -2 * w; rotations[ind + 2] = 2 * v; rotations[ind + 3] = 2 * w; rotations[ind + 4] = 2 * s; rotations[ind + 5] = -2 * u; rotations[ind + 6] = -2 * v; rotations[ind + 7] = 2 * u; rotations[ind + 8] = 2 * s; ind = n * num_points * 6 * 9 + p * 6 * 9 + 27; rotations[ind + 0] = 2 * u; rotations[ind + 1] = 2 * v; rotations[ind + 2] = 2 * w; rotations[ind + 3] = 2 * v; rotations[ind + 4] = -2 * u; rotations[ind + 5] = -2 * s; rotations[ind + 6] = 2 * w; rotations[ind + 7] = 2 * s; rotations[ind + 8] = -2 * u; ind = n * num_points * 6 * 9 + p * 6 * 9 + 36; rotations[ind + 0] = -2 * v; rotations[ind + 1] = 2 * u; rotations[ind + 2] = 2 * s; rotations[ind + 3] = 2 * u; rotations[ind + 4] = 2 * v; rotations[ind + 5] = 2 * w; rotations[ind + 6] = -2 * s; rotations[ind + 7] = 2 * w; rotations[ind + 8] = -2 * v; ind = n * num_points * 6 * 9 + p * 6 * 9 + 45; rotations[ind + 0] = -2 * w; rotations[ind + 1] = -2 * s; rotations[ind + 2] = 2 * u; rotations[ind + 3] = 2 * s; rotations[ind + 4] = -2 * w; rotations[ind + 5] = 2 * v; rotations[ind + 6] = 2 * u; rotations[ind + 7] = 2 * v; rotations[ind + 8] = 2 * w; // for the point int index = index_cls * num_points * 3 + p * 3; ind = n * num_points * 6 * 9 + p * 6 * 9; // rotate the first point Dtype x1 = rotations[ind + 9 + 0] * point[index + 0] + rotations[ind + 9 + 1] * point[index + 1] + rotations[ind + 9 + 2] * point[index + 2]; Dtype y1 = rotations[ind + 9 + 3] * point[index + 0] + rotations[ind + 9 + 4] * point[index + 1] + rotations[ind + 9 + 5] * point[index + 2]; Dtype z1 = rotations[ind + 9 + 6] * point[index + 0] + rotations[ind + 9 + 7] * point[index + 1] + rotations[ind + 9 + 8] * point[index + 2]; int index_min; Dtype x2, y2, z2; if (symmetry[index_cls] > 0) { // find the closet point for symmetry object Dtype dmin = FLT_MAX; for (int i = 0; i < num_points; i++) { int index2 = index_cls * num_points * 3 + i * 3; x2 = rotations[ind + 0] * point[index2 + 0] + rotations[ind + 1] * point[index2 + 1] + rotations[ind + 2] * point[index2 + 2]; y2 = rotations[ind + 3] * point[index2 + 0] + rotations[ind + 4] * point[index2 + 1] + rotations[ind + 5] * point[index2 + 2]; z2 = rotations[ind + 6] * point[index2 + 0] + rotations[ind + 7] * point[index2 + 1] + rotations[ind + 8] * point[index2 + 2]; Dtype distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2); if (distance < dmin) { dmin = distance; index_min = index2; } } } else index_min = index; x2 = rotations[ind + 0] * point[index_min + 0] + rotations[ind + 1] * point[index_min + 1] + rotations[ind + 2] * point[index_min + 2]; y2 = rotations[ind + 3] * point[index_min + 0] + rotations[ind + 4] * point[index_min + 1] + rotations[ind + 5] * point[index_min + 2]; z2 = rotations[ind + 6] * point[index_min + 0] + rotations[ind + 7] * point[index_min + 1] + rotations[ind + 8] * point[index_min + 2]; // smooth l1 loss Dtype distance = 0; int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + POSE_CHANNELS * index_cls; for (int j = 0; j < 3; j++) { Dtype diff, df; if (j == 0) diff = x1 - x2; else if (j == 1) diff = y1 - y2; else diff = z1 - z2; if (fabs(diff) < 1) { distance += 0.5 * diff * diff; df = diff; } else { distance += fabs(diff) - 0.5; if (diff > 0) df = 1.0; else df = -1.0; } for (int k = 0; k < 3; k++) { ind = n * num_points * 6 * 9 + p * 6 * 9 + 18; diffs[index_diff + 0] += df * point[index + k] * rotations[ind + j * 3 + k] / num_points; ind = n * num_points * 6 * 9 + p * 6 * 9 + 27; diffs[index_diff + 1] += df * point[index + k] * rotations[ind + j * 3 + k] / num_points; ind = n * num_points * 6 * 9 + p * 6 * 9 + 36; diffs[index_diff + 2] += df * point[index + k] * rotations[ind + j * 3 + k] / num_points; ind = n * num_points * 6 * 9 + p * 6 * 9 + 45; diffs[index_diff + 3] += df * point[index + k] * rotations[ind + j * 3 + k] / num_points; } } losses[index_thread] = distance / num_points; } } template <typename Dtype> __global__ void sum_losses_gradients(const int nthreads, const Dtype* losses, const Dtype* diffs, const int num_classes, const int num_points, const float batch_hard, Dtype* angles, Dtype* loss_batch, Dtype* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / (POSE_CHANNELS * num_classes); int c = index % (POSE_CHANNELS * num_classes); bottom_diff[index] = 0; if (angles[n] > 0) { for (int p = 0; p < num_points; p++) { int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + c; bottom_diff[index] += diffs[index_diff] / batch_hard; } } if (c == 0) { loss_batch[n] = 0; if (angles[n] > 0) { for (int p = 0; p < num_points; p++) loss_batch[n] += losses[n * num_points + p] / batch_hard; } } } } std::vector<at::Tensor> pml_cuda_forward( at::Tensor bottom_prediction, at::Tensor bottom_target, at::Tensor bottom_weight, at::Tensor points, at::Tensor symmetry, float hard_angle) { // run kernels cudaError_t err; const int kThreadsPerBlock = 512; int output_size; // temp losses const int batch_size = bottom_prediction.size(0); const int num_classes = points.size(1); const int num_points = points.size(2); auto losses = at::zeros({batch_size, num_points}, points.options()); auto losses_batch = at::zeros({batch_size}, points.options()); auto angles_batch = at::zeros({batch_size}, points.options()); auto top_data = at::zeros({1}, points.options()); // temp diffs auto diffs = at::zeros({batch_size, num_points, POSE_CHANNELS * num_classes}, points.options()); auto bottom_diff = at::zeros({batch_size, POSE_CHANNELS * num_classes}, points.options()); // temp rotations auto rotations = at::zeros({batch_size, num_points, 6 * 9}, points.options()); // compute the losses and gradients output_size = batch_size * num_points; AveragedistanceForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, bottom_prediction.data<float>(), bottom_target.data<float>(), bottom_weight.data<float>(), points.data<float>(), symmetry.data<float>(), batch_size, num_classes, num_points, hard_angle, rotations.data<float>(), losses.data<float>(), diffs.data<float>(), angles_batch.data<float>()); cudaDeviceSynchronize(); // sum the angle flags thrust::device_ptr<float> angles_ptr(angles_batch.data<float>()); float batch_hard = thrust::reduce(angles_ptr, angles_ptr + batch_size); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // sum the diffs output_size = batch_size * POSE_CHANNELS * num_classes; sum_losses_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, losses.data<float>(), diffs.data<float>(), num_classes, num_points, batch_hard, angles_batch.data<float>(), losses_batch.data<float>(), bottom_diff.data<float>()); cudaDeviceSynchronize(); // sum the loss thrust::device_ptr<float> losses_ptr(losses_batch.data<float>()); float loss = thrust::reduce(losses_ptr, losses_ptr + batch_size); cudaMemcpy(top_data.data<float>(), &loss, sizeof(float), cudaMemcpyHostToDevice); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return {top_data, bottom_diff}; } template <typename Dtype> __global__ void AveragedistanceBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_diff, Dtype* output) { CUDA_1D_KERNEL_LOOP(index, nthreads) { output[index] = top_diff[0] * bottom_diff[index]; } } std::vector<at::Tensor> pml_cuda_backward( at::Tensor grad_loss, at::Tensor bottom_diff) { cudaError_t err; const int kThreadsPerBlock = 512; int output_size; const int batch_size = bottom_diff.size(0); const int num_classes = bottom_diff.size(1) / POSE_CHANNELS; auto grad_rotation = at::zeros({batch_size, POSE_CHANNELS * num_classes}, bottom_diff.options()); output_size = batch_size * POSE_CHANNELS * num_classes; AveragedistanceBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, grad_loss.data<float>(), bottom_diff.data<float>(), grad_rotation.data<float>()); cudaDeviceSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return {grad_rotation}; }
the_stack
#include <stddef.h> #include <stdio.h> #include <string.h> #include "scalar.h" static __constant__ DEEPWAVE_TYPE fd1[2 * DEEPWAVE_DIM]; static __constant__ DEEPWAVE_TYPE fd2[2 * DEEPWAVE_DIM + 1]; #define gpuErrchk(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } static inline __device__ DEEPWAVE_TYPE laplacian_1d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si); static inline __device__ DEEPWAVE_TYPE laplacian_2d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x); static inline __device__ DEEPWAVE_TYPE laplacian_3d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x, const ptrdiff_t size_xy); static inline __device__ ptrdiff_t location_index( const ptrdiff_t * const arr, const ptrdiff_t * const shape, const ptrdiff_t index); static inline __device__ DEEPWAVE_TYPE z_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_xy); static inline __device__ DEEPWAVE_TYPE y_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x); static inline __device__ DEEPWAVE_TYPE x_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si); #if DEEPWAVE_DIM == 1 __global__ void propagate_kernel( DEEPWAVE_TYPE * const wfn, DEEPWAVE_TYPE * const phizn, const DEEPWAVE_TYPE * const wfc, const DEEPWAVE_TYPE * const wfp, const DEEPWAVE_TYPE * const phizc, const DEEPWAVE_TYPE * const sigmaz, const DEEPWAVE_TYPE * const model, const ptrdiff_t shape_z, const ptrdiff_t numel_shot, const ptrdiff_t num_shots, const ptrdiff_t pmlz0, const ptrdiff_t pmlz1, const DEEPWAVE_TYPE dt) { const ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; const ptrdiff_t z = blockIdx.x * blockDim.x + threadIdx.x + ZPAD; if ((shot < num_shots) && (z < shape_z - ZPAD)) { const ptrdiff_t i = z; const ptrdiff_t si = shot * numel_shot + i; const DEEPWAVE_TYPE lap = laplacian_1d(wfc, si); if ((z >= pmlz0 + 2 * ZPAD) && (z < shape_z - pmlz1 - 2 * ZPAD)) { /* Update wavefield */ wfn[si] = model[i] * lap + 2 * wfc[si] - wfp[si]; } else { /* Inside PML region */ const DEEPWAVE_TYPE wfc_z = z_deriv(wfc, si, 1); const DEEPWAVE_TYPE phizc_z = z_deriv(phizc, si, 1); /* Update wavefield */ wfn[si] = 1 / (1 + dt * sigmaz[z] / 2) * (model[i] * (lap + phizc_z) + dt * sigmaz[z] * wfp[si] / 2 + (2 * wfc[si] - wfp[si])); /* Update phi */ phizn[si] = phizc[si] - dt * sigmaz[z] * (wfc_z + phizc[si]); } } } void propagate(DEEPWAVE_TYPE * const wfn, /* next wavefield */ DEEPWAVE_TYPE * const auxn, /* next auxiliary */ const DEEPWAVE_TYPE * const wfc, /* current wavefield */ const DEEPWAVE_TYPE * const wfp, /* previous wavefield */ const DEEPWAVE_TYPE * const auxc, /* current auxiliary */ const DEEPWAVE_TYPE * const sigma, const DEEPWAVE_TYPE * const model, const DEEPWAVE_TYPE * const fd1_d, /* 1st diff coeffs */ const DEEPWAVE_TYPE * const fd2_d, /* 2nd diff coeffs */ const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t numel_shot = shape[0]; DEEPWAVE_TYPE * const phizn = auxn; const DEEPWAVE_TYPE * const phizc = auxc; const DEEPWAVE_TYPE * const sigmaz = sigma; const dim3 dimBlock(32, 32, 1); const int gridx = (shape[0] - (2 * ZPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; const int gridz = 1; const dim3 dimGrid(gridx, gridy, gridz); propagate_kernel<<<dimGrid, dimBlock>>>( wfn, phizn, wfc, wfp, phizc, sigmaz, model, shape[0], numel_shot, num_shots, pml_width[0], pml_width[1], dt); gpuErrchk(cudaPeekAtLastError()); } void __global__ imaging_condition_kernel( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigmaz, const ptrdiff_t shape_z, const ptrdiff_t num_shots) { const ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; const ptrdiff_t z = blockIdx.x * blockDim.x + threadIdx.x + ZPAD; if ((shot < num_shots) && (z < shape_z - ZPAD)) { const ptrdiff_t i = z; const ptrdiff_t si = shot * shape_z + i; atomicAdd(model_grad + i, current_wavefield[si] * (current_saved_wavefield_tt[si] + sigmaz[z] * current_saved_wavefield_t[si])); } } void imaging_condition( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigma, const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots) { if (model_grad == NULL) return; /* Not doing model inversion */ const dim3 dimBlock(32, 32, 1); const int gridx = (shape[0] - (2 * ZPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; const int gridz = 1; const dim3 dimGrid(gridx, gridy, gridz); const DEEPWAVE_TYPE * const sigmaz = sigma; imaging_condition_kernel<<<dimGrid, dimBlock>>>( model_grad, current_wavefield, current_saved_wavefield_t, current_saved_wavefield_tt, sigmaz, shape[0], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ add_scattering_kernel( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t shape_z, const ptrdiff_t num_shots) { const ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; const ptrdiff_t z = blockIdx.x * blockDim.x + threadIdx.x + ZPAD; if ((shot < num_shots) && (z < shape_z - ZPAD)) { const ptrdiff_t i = z; const ptrdiff_t si = shot * shape_z + i; const DEEPWAVE_TYPE current_wavefield_tt = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]); /* no dt^2 because of cancellation */ next_scattered_wavefield[si] += current_wavefield_tt * scatter[i]; } } void add_scattering( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t * const shape, const ptrdiff_t num_shots) { const dim3 dimBlock(32, 32, 1); const int gridx = (shape[0] - (2 * ZPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; const int gridz = 1; const dim3 dimGrid(gridx, gridy, gridz); add_scattering_kernel<<<dimGrid, dimBlock>>>( next_scattered_wavefield, next_wavefield, current_wavefield, previous_wavefield, scatter, shape[0], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ save_wavefields_kernel( DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t shape_z, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; const ptrdiff_t z = blockIdx.x * blockDim.x + threadIdx.x + ZPAD; if ((shot < num_shots) && (z < shape_z - ZPAD)) { const ptrdiff_t i = z; const ptrdiff_t si = shot * shape_z + i; current_saved_wavefield_t[si] = (current_wavefield[si] - previous_wavefield[si]) / dt; current_saved_wavefield_tt[si] = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]) / dt / dt; } } void save_wavefields(DEEPWAVE_TYPE * const current_saved_wavefield, DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t * const shape, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt, const enum wavefield_save_strategy save_strategy) { if (save_strategy == STRATEGY_COPY) { gpuErrchk( cudaMemcpy(current_saved_wavefield, current_wavefield, num_shots * shape[0] * shape[1] * shape[2] * sizeof(DEEPWAVE_TYPE), cudaMemcpyDeviceToDevice)); const dim3 dimBlock(32, 32, 1); const int gridx = (shape[0] - (2 * ZPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; const int gridz = 1; const dim3 dimGrid(gridx, gridy, gridz); save_wavefields_kernel<<<dimGrid, dimBlock>>>( current_saved_wavefield_t, current_saved_wavefield_tt, next_wavefield, current_wavefield, previous_wavefield, shape[0], num_shots, dt); gpuErrchk(cudaPeekAtLastError()); } } static inline __device__ ptrdiff_t location_index(const ptrdiff_t * const arr, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t index) { const ptrdiff_t z = arr[index]; return z; } static inline __device__ DEEPWAVE_TYPE laplacian_1d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si) { return fd2[0] * arr[si] + fd2[1] * (arr[si + 1] + arr[si - 1]) + fd2[2] * (arr[si + 2] + arr[si - 2]); } #elif DEEPWAVE_DIM == 2 __global__ void propagate_kernel( DEEPWAVE_TYPE * const wfn, DEEPWAVE_TYPE * const phizn, DEEPWAVE_TYPE * const phiyn, const DEEPWAVE_TYPE * const wfc, const DEEPWAVE_TYPE * const wfp, const DEEPWAVE_TYPE * const phizc, const DEEPWAVE_TYPE * const sigmaz, const DEEPWAVE_TYPE * const phiyc, const DEEPWAVE_TYPE * const sigmay, const DEEPWAVE_TYPE * const model, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t numel_shot, const ptrdiff_t num_shots, const ptrdiff_t pmlz0, const ptrdiff_t pmlz1, const ptrdiff_t pmly0, const ptrdiff_t pmly1, const DEEPWAVE_TYPE dt) { const ptrdiff_t shot = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t z = blockIdx.y * blockDim.y + threadIdx.y + ZPAD; const ptrdiff_t y = blockIdx.x * blockDim.x + threadIdx.x + YPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD)) { const ptrdiff_t i = z * shape_y + y; const ptrdiff_t si = shot * numel_shot + i; const DEEPWAVE_TYPE lap = laplacian_2d(wfc, si, shape_y); if ((z >= pmlz0 + 2 * ZPAD) && (z < shape_z - pmlz1 - 2 * ZPAD) && (y >= pmly0 + 2 * YPAD) && (y < shape_y - pmly1 - 2 * YPAD)) { /* Update wavefield */ wfn[si] = model[i] * lap + 2 * wfc[si] - wfp[si]; } else { /* Inside PML region */ const DEEPWAVE_TYPE wfc_z = z_deriv(wfc, si, shape_y); const DEEPWAVE_TYPE phizc_z = z_deriv(phizc, si, shape_y); const DEEPWAVE_TYPE wfc_y = y_deriv(wfc, si, 1); const DEEPWAVE_TYPE phiyc_y = y_deriv(phiyc, si, 1); /* Update wavefield */ wfn[si] = 1 / (1 + dt * (sigmaz[z] + sigmay[y]) / 2) * (model[i] * (lap + phizc_z + phiyc_y) + dt * (sigmaz[z] + sigmay[y]) * wfp[si] / 2 + (2 * wfc[si] - wfp[si]) - dt * dt * sigmaz[z] * sigmay[y] * wfc[si]); /* Update phi */ phizn[si] = phizc[si] - dt * (sigmaz[z] * phizc[si] + (sigmaz[z] - sigmay[y]) * wfc_z); phiyn[si] = phiyc[si] - dt * (sigmay[y] * phiyc[si] + (sigmay[y] - sigmaz[z]) * wfc_y); } } } void propagate(DEEPWAVE_TYPE * const wfn, /* next wavefield */ DEEPWAVE_TYPE * const auxn, /* next auxiliary */ const DEEPWAVE_TYPE * const wfc, /* current wavefield */ const DEEPWAVE_TYPE * const wfp, /* previous wavefield */ const DEEPWAVE_TYPE * const auxc, /* current auxiliary */ const DEEPWAVE_TYPE * const sigma, const DEEPWAVE_TYPE * const model, const DEEPWAVE_TYPE * const fd1_d, /* 1st diff coeffs */ const DEEPWAVE_TYPE * const fd2_d, /* 2nd diff coeffs */ const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t numel_shot = shape[0] * shape[1]; DEEPWAVE_TYPE * const phizn = auxn; const DEEPWAVE_TYPE * const phizc = auxc; const DEEPWAVE_TYPE * const sigmaz = sigma; DEEPWAVE_TYPE * const phiyn = auxn + num_shots * numel_shot; const DEEPWAVE_TYPE * const phiyc = auxc + num_shots * numel_shot; const DEEPWAVE_TYPE * const sigmay = sigma + shape[0]; const dim3 dimBlock(32, 32, 1); const int gridx = (shape[1] - (2 * YPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[0] - (2 * ZPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); propagate_kernel<<<dimGrid, dimBlock>>>( wfn, phizn, phiyn, wfc, wfp, phizc, sigmaz, phiyc, sigmay, model, shape[0], shape[1], numel_shot, num_shots, pml_width[0], pml_width[1], pml_width[2], pml_width[3], dt); gpuErrchk(cudaPeekAtLastError()); } void __global__ imaging_condition_kernel( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigmaz, const DEEPWAVE_TYPE * const sigmay, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t numel_shot, const ptrdiff_t num_shots) { const ptrdiff_t shot = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t z = blockIdx.y * blockDim.y + threadIdx.y + ZPAD; const ptrdiff_t y = blockIdx.x * blockDim.x + threadIdx.x + YPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD)) { const ptrdiff_t i = z * shape_y + y; const ptrdiff_t si = shot * numel_shot + i; atomicAdd(model_grad + i, current_wavefield[si] * (current_saved_wavefield_tt[si] + (sigmaz[z] + sigmay[y]) * current_saved_wavefield_t[si] + sigmaz[z] * sigmay[y] * current_saved_wavefield[si])); } } void imaging_condition( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigma, const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots) { if (model_grad == NULL) return; /* Not doing model inversion */ const dim3 dimBlock(32, 32, 1); const int gridx = (shape[1] - (2 * YPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[0] - (2 * ZPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); const DEEPWAVE_TYPE * const sigmaz = sigma; const DEEPWAVE_TYPE * const sigmay = sigma + shape[0]; imaging_condition_kernel<<<dimGrid, dimBlock>>>( model_grad, current_wavefield, current_saved_wavefield, current_saved_wavefield_t, current_saved_wavefield_tt, sigmaz, sigmay, shape[0], shape[1], shape[0] * shape[1], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ add_scattering_kernel( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t numel_shot, const ptrdiff_t num_shots) { const ptrdiff_t shot = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t z = blockIdx.y * blockDim.y + threadIdx.y + ZPAD; const ptrdiff_t y = blockIdx.x * blockDim.x + threadIdx.x + YPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD)) { const ptrdiff_t i = z * shape_y + y; const ptrdiff_t si = shot * numel_shot + i; const DEEPWAVE_TYPE current_wavefield_tt = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]); /* no dt^2 because of cancellation */ next_scattered_wavefield[si] += current_wavefield_tt * scatter[i]; } } void add_scattering( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t * const shape, const ptrdiff_t num_shots) { const dim3 dimBlock(32, 32, 1); const int gridx = (shape[1] - (2 * YPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[0] - (2 * ZPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); add_scattering_kernel<<<dimGrid, dimBlock>>>( next_scattered_wavefield, next_wavefield, current_wavefield, previous_wavefield, scatter, shape[0], shape[1], shape[0] * shape[1], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ save_wavefields_kernel( DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t numel_shot, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t shot = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t z = blockIdx.y * blockDim.y + threadIdx.y + ZPAD; const ptrdiff_t y = blockIdx.x * blockDim.x + threadIdx.x + YPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD)) { const ptrdiff_t i = z * shape_y + y; const ptrdiff_t si = shot * numel_shot + i; current_saved_wavefield_t[si] = (current_wavefield[si] - previous_wavefield[si]) / dt; current_saved_wavefield_tt[si] = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]) / dt / dt; } } void save_wavefields(DEEPWAVE_TYPE * const current_saved_wavefield, DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t * const shape, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt, const enum wavefield_save_strategy save_strategy) { if (save_strategy == STRATEGY_COPY) { gpuErrchk( cudaMemcpy(current_saved_wavefield, current_wavefield, num_shots * shape[0] * shape[1] * shape[2] * sizeof(DEEPWAVE_TYPE), cudaMemcpyDeviceToDevice)); const dim3 dimBlock(32, 32, 1); const int gridx = (shape[1] - (2 * YPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[0] - (2 * ZPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); save_wavefields_kernel<<<dimGrid, dimBlock>>>( current_saved_wavefield_t, current_saved_wavefield_tt, next_wavefield, current_wavefield, previous_wavefield, shape[0], shape[1], shape[0] * shape[1], num_shots, dt); gpuErrchk(cudaPeekAtLastError()); } } static inline __device__ ptrdiff_t location_index(const ptrdiff_t * const arr, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t index) { const ptrdiff_t z = arr[index * 2]; const ptrdiff_t y = arr[index * 2 + 1]; return z * shape_y + y; } static inline __device__ DEEPWAVE_TYPE laplacian_2d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x) { return fd2[0] * arr[si] + fd2[1] * (arr[si + size_x] + arr[si - size_x]) + fd2[2] * (arr[si + 2 * size_x] + arr[si - 2 * size_x]) + +fd2[3] * (arr[si + 1] + arr[si - 1]) + fd2[4] * (arr[si + 2] + arr[si - 2]); } #elif DEEPWAVE_DIM == 3 __global__ void propagate_kernel( DEEPWAVE_TYPE * const wfn, DEEPWAVE_TYPE * const phizn, DEEPWAVE_TYPE * const phiyn, DEEPWAVE_TYPE * const phixn, DEEPWAVE_TYPE * const psin, const DEEPWAVE_TYPE * const wfc, const DEEPWAVE_TYPE * const wfp, const DEEPWAVE_TYPE * const phizc, const DEEPWAVE_TYPE * const sigmaz, const DEEPWAVE_TYPE * const phiyc, const DEEPWAVE_TYPE * const sigmay, const DEEPWAVE_TYPE * const phixc, const DEEPWAVE_TYPE * const sigmax, const DEEPWAVE_TYPE * const psic, const DEEPWAVE_TYPE * const model, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t numel_shot, const ptrdiff_t size_xy, const ptrdiff_t num_shots, const ptrdiff_t pmlz0, const ptrdiff_t pmlz1, const ptrdiff_t pmly0, const ptrdiff_t pmly1, const ptrdiff_t pmlx0, const ptrdiff_t pmlx1, const DEEPWAVE_TYPE dt) { const ptrdiff_t threadz = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t shot = threadz / (shape_z - ZPAD - ZPAD + 1); const ptrdiff_t z = threadz % (shape_z - ZPAD - ZPAD + 1) + ZPAD; const ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y + YPAD; const ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x + XPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD) && (x < shape_x - XPAD)) { const ptrdiff_t i = z * size_xy + y * shape_x + x; const ptrdiff_t si = shot * numel_shot + i; const DEEPWAVE_TYPE lap = laplacian_3d(wfc, si, size_xy, shape_x); if ((z >= pmlz0 + 2 * ZPAD) && (z < shape_z - pmlz1 - 2 * ZPAD) && (y >= pmly0 + 2 * YPAD) && (y < shape_y - pmly1 - 2 * YPAD) && (x >= pmlx0 + 2 * XPAD) && (x < shape_x - pmlx1 - 2 * XPAD)) { /* Update wavefield */ wfn[si] = model[i] * lap + 2 * wfc[si] - wfp[si]; } else { /* Inside PML region */ const DEEPWAVE_TYPE wfc_z = z_deriv(wfc, si, size_xy); const DEEPWAVE_TYPE wfc_y = y_deriv(wfc, si, shape_x); const DEEPWAVE_TYPE wfc_x = x_deriv(wfc, si); const DEEPWAVE_TYPE phizc_z = z_deriv(phizc, si, size_xy); const DEEPWAVE_TYPE phiyc_y = y_deriv(phiyc, si, shape_x); const DEEPWAVE_TYPE phixc_x = x_deriv(phixc, si); const DEEPWAVE_TYPE psic_z = z_deriv(psic, si, size_xy); const DEEPWAVE_TYPE psic_y = y_deriv(psic, si, shape_x); const DEEPWAVE_TYPE psic_x = x_deriv(psic, si); /* Update wavefield */ wfn[si] = 1 / (1 + dt * (sigmaz[z] + sigmay[y] + sigmax[x]) / 2) * (model[i] * lap + dt * dt * (phizc_z + phiyc_y + phixc_x - sigmaz[z] * sigmay[y] * sigmax[x] * psic[si]) + dt * (sigmaz[z] + sigmay[y] + sigmax[x]) * wfp[si] / 2 + (2 * wfc[si] - wfp[si]) - dt * dt * wfc[si] * (sigmax[x] * sigmay[y] + sigmay[y] * sigmaz[z] + sigmax[x] * sigmaz[z])); /* Update phi */ phizn[si] = phizc[si] - dt * sigmaz[z] * phizc[si] + model[i] / dt * (sigmay[y] + sigmax[x]) * wfc_z + dt * sigmax[x] * sigmay[y] * psic_z; phiyn[si] = phiyc[si] - dt * sigmay[y] * phiyc[si] + model[i] / dt * (sigmaz[z] + sigmax[x]) * wfc_y + dt * sigmax[x] * sigmaz[z] * psic_y; phixn[si] = phixc[si] - dt * sigmax[x] * phixc[si] + model[i] / dt * (sigmaz[z] + sigmay[y]) * wfc_x + dt * sigmaz[z] * sigmay[y] * psic_x; /* Update psi */ psin[si] = psic[si] + dt * wfc[si]; } } } void propagate(DEEPWAVE_TYPE * const wfn, /* next wavefield */ DEEPWAVE_TYPE * const auxn, /* next auxiliary */ const DEEPWAVE_TYPE * const wfc, /* current wavefield */ const DEEPWAVE_TYPE * const wfp, /* previous wavefield */ const DEEPWAVE_TYPE * const auxc, /* current auxiliary */ const DEEPWAVE_TYPE * const sigma, const DEEPWAVE_TYPE * const model, const DEEPWAVE_TYPE * const fd1_d, /* 1st diff coeffs */ const DEEPWAVE_TYPE * const fd2_d, /* 2nd diff coeffs */ const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t numel_shot = shape[0] * shape[1] * shape[2]; const ptrdiff_t size_xy = shape[1] * shape[2]; DEEPWAVE_TYPE * const phizn = auxn; const DEEPWAVE_TYPE * const phizc = auxc; const DEEPWAVE_TYPE * const sigmaz = sigma; DEEPWAVE_TYPE * const phiyn = auxn + num_shots * numel_shot; const DEEPWAVE_TYPE * const phiyc = auxc + num_shots * numel_shot; const DEEPWAVE_TYPE * const sigmay = sigma + shape[0]; DEEPWAVE_TYPE * const phixn = auxn + 2 * num_shots * numel_shot; DEEPWAVE_TYPE * const psin = auxn + 3 * num_shots * numel_shot; const DEEPWAVE_TYPE * const phixc = auxc + 2 * num_shots * numel_shot; const DEEPWAVE_TYPE * const psic = auxc + 3 * num_shots * numel_shot; const DEEPWAVE_TYPE * const sigmax = sigma + shape[0] + shape[1]; const dim3 dimBlock(32, 32, 1); const int gridx = (shape[2] - (2 * XPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[1] - (2 * YPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots * (shape[0] - (2 * ZPAD)) + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); propagate_kernel<<<dimGrid, dimBlock>>>( wfn, phizn, phiyn, phixn, psin, wfc, wfp, phizc, sigmaz, phiyc, sigmay, phixc, sigmax, psic, model, shape[0], shape[1], shape[2], numel_shot, size_xy, num_shots, pml_width[0], pml_width[1], pml_width[2], pml_width[3], pml_width[4], pml_width[5], dt); gpuErrchk(cudaPeekAtLastError()); } void __global__ imaging_condition_kernel( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigmaz, const DEEPWAVE_TYPE * const sigmay, const DEEPWAVE_TYPE * const sigmax, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t numel_shot, const ptrdiff_t size_xy, const ptrdiff_t num_shots) { const ptrdiff_t threadz = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t shot = threadz / (shape_z - ZPAD - ZPAD + 1); const ptrdiff_t z = threadz % (shape_z - ZPAD - ZPAD + 1) + ZPAD; const ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y + YPAD; const ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x + XPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD) && (x < shape_x - XPAD)) { const ptrdiff_t i = z * size_xy + y * shape_x + x; const ptrdiff_t si = shot * numel_shot + i; /* NOTE: There should be an additional term here (involving spatial * derivative of phi, sigma, and psi), but it is neglected due to * the additional computational cost it would cause. */ atomicAdd(model_grad + i, current_wavefield[si] * (current_saved_wavefield_tt[si] + (sigmaz[z] + sigmay[y] + sigmax[x]) * current_saved_wavefield_t[si] + (sigmax[x] * sigmay[y] + sigmay[y] * sigmaz[z] + sigmax[x] * sigmaz[z]) * current_saved_wavefield[si])); } } void imaging_condition( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield, const DEEPWAVE_TYPE * const current_saved_wavefield_t, const DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const sigma, const ptrdiff_t * const shape, const ptrdiff_t * const pml_width, const ptrdiff_t num_shots) { if (model_grad == NULL) return; /* Not doing model inversion */ const dim3 dimBlock(32, 32, 1); const int gridx = (shape[2] - (2 * XPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[1] - (2 * YPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots * (shape[0] - (2 * ZPAD)) + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); const DEEPWAVE_TYPE * const sigmaz = sigma; const DEEPWAVE_TYPE * const sigmay = sigma + shape[0]; const DEEPWAVE_TYPE * const sigmax = sigma + shape[0] + shape[1]; imaging_condition_kernel<<<dimGrid, dimBlock>>>( model_grad, current_wavefield, current_saved_wavefield, current_saved_wavefield_t, current_saved_wavefield_tt, sigmaz, sigmay, sigmax, shape[0], shape[1], shape[2], shape[0] * shape[1] * shape[2], shape[1] * shape[2], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ add_scattering_kernel( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t numel_shot, const ptrdiff_t size_xy, const ptrdiff_t num_shots) { const ptrdiff_t threadz = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t shot = threadz / (shape_z - ZPAD - ZPAD + 1); const ptrdiff_t z = threadz % (shape_z - ZPAD - ZPAD + 1) + ZPAD; const ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y + YPAD; const ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x + XPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD) && (x < shape_x - XPAD)) { const ptrdiff_t i = z * size_xy + y * shape_x + x; const ptrdiff_t si = shot * numel_shot + i; const DEEPWAVE_TYPE current_wavefield_tt = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]); /* no dt^2 because of cancellation */ next_scattered_wavefield[si] += current_wavefield_tt * scatter[i]; } } void add_scattering( DEEPWAVE_TYPE * const next_scattered_wavefield, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const DEEPWAVE_TYPE * const scatter, const ptrdiff_t * const shape, const ptrdiff_t num_shots) { const dim3 dimBlock(32, 32, 1); const int gridx = (shape[2] - (2 * XPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[1] - (2 * YPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots * (shape[0] - (2 * ZPAD)) + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); add_scattering_kernel<<<dimGrid, dimBlock>>>( next_scattered_wavefield, next_wavefield, current_wavefield, previous_wavefield, scatter, shape[0], shape[1], shape[2], shape[0] * shape[1] * shape[2], shape[1] * shape[2], num_shots); gpuErrchk(cudaPeekAtLastError()); } void __global__ save_wavefields_kernel( DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t numel_shot, const ptrdiff_t size_xy, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt) { const ptrdiff_t threadz = blockIdx.z * blockDim.z + threadIdx.z; const ptrdiff_t shot = threadz / (shape_z - ZPAD - ZPAD + 1); const ptrdiff_t z = threadz % (shape_z - ZPAD - ZPAD + 1) + ZPAD; const ptrdiff_t y = blockIdx.y * blockDim.y + threadIdx.y + YPAD; const ptrdiff_t x = blockIdx.x * blockDim.x + threadIdx.x + XPAD; if ((shot < num_shots) && (z < shape_z - ZPAD) && (y < shape_y - YPAD) && (x < shape_x - XPAD)) { const ptrdiff_t i = z * size_xy + y * shape_x + x; const ptrdiff_t si = shot * numel_shot + i; current_saved_wavefield_t[si] = (next_wavefield[si] - previous_wavefield[si]) / 2 / dt; current_saved_wavefield_tt[si] = (next_wavefield[si] - 2 * current_wavefield[si] + previous_wavefield[si]) / dt / dt; } } void save_wavefields(DEEPWAVE_TYPE * const current_saved_wavefield, DEEPWAVE_TYPE * const current_saved_wavefield_t, DEEPWAVE_TYPE * const current_saved_wavefield_tt, const DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const current_wavefield, const DEEPWAVE_TYPE * const previous_wavefield, const ptrdiff_t * const shape, const ptrdiff_t num_shots, const DEEPWAVE_TYPE dt, const enum wavefield_save_strategy save_strategy) { if (save_strategy == STRATEGY_COPY) { gpuErrchk( cudaMemcpy(current_saved_wavefield, current_wavefield, num_shots * shape[0] * shape[1] * shape[2] * sizeof(DEEPWAVE_TYPE), cudaMemcpyDeviceToDevice)); const dim3 dimBlock(32, 32, 1); const int gridx = (shape[2] - (2 * XPAD) + dimBlock.x - 1) / dimBlock.x; const int gridy = (shape[1] - (2 * YPAD) + dimBlock.y - 1) / dimBlock.y; const int gridz = (num_shots * (shape[0] - (2 * ZPAD)) + dimBlock.z - 1) / dimBlock.z; const dim3 dimGrid(gridx, gridy, gridz); save_wavefields_kernel<<<dimGrid, dimBlock>>>( current_saved_wavefield_t, current_saved_wavefield_tt, next_wavefield, current_wavefield, previous_wavefield, shape[0], shape[1], shape[2], shape[0] * shape[1] * shape[2], shape[1] * shape[2], num_shots, dt); gpuErrchk(cudaPeekAtLastError()); } } static inline __device__ ptrdiff_t location_index(const ptrdiff_t * const arr, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t index) { const ptrdiff_t z = arr[index * 3]; const ptrdiff_t y = arr[index * 3 + 1]; const ptrdiff_t x = arr[index * 3 + 2]; return z * shape_y * shape_x + y * shape_x + x; } static inline __device__ DEEPWAVE_TYPE laplacian_3d(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x, const ptrdiff_t size_xy) { return fd2[0] * arr[si] + fd2[1] * (arr[si + size_xy] + arr[si - size_xy]) + fd2[2] * (arr[si + 2 * size_xy] + arr[si - 2 * size_xy]) + +fd2[3] * (arr[si + size_x] + arr[si - size_x]) + fd2[4] * (arr[si + 2 * size_x] + arr[si - 2 * size_x]) + fd2[5] * (arr[si + 1] + arr[si - 1]) + fd2[6] * (arr[si + 2] + arr[si - 2]); } #else #error "Must specify the dimension, e.g. -D DEEPWAVE_DIM=1" #endif /* DEEPWAVE_DIM */ void setup(const DEEPWAVE_TYPE * const fd1_d, const DEEPWAVE_TYPE * const fd2_d) { gpuErrchk(cudaMemcpyToSymbol(fd1, fd1_d, 2 * DEEPWAVE_DIM * sizeof(DEEPWAVE_TYPE))); gpuErrchk(cudaMemcpyToSymbol(fd2, fd2_d, (2 * DEEPWAVE_DIM + 1) * sizeof(DEEPWAVE_TYPE))); } void __global__ add_sources_kernel( DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const model, const DEEPWAVE_TYPE * const source_amplitudes, const ptrdiff_t * const source_locations, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t num_shots, const ptrdiff_t num_sources_per_shot) { ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; ptrdiff_t source = blockIdx.x * blockDim.x + threadIdx.x; if ((shot < num_shots) && (source < num_sources_per_shot)) { ptrdiff_t s = shot * num_sources_per_shot + source; ptrdiff_t i = location_index(source_locations, shape_y, shape_x, s); ptrdiff_t si = shot * shape_z * shape_y * shape_x + i; atomicAdd(next_wavefield + si, source_amplitudes[s] * model[i]); } } void add_sources(DEEPWAVE_TYPE * const next_wavefield, const DEEPWAVE_TYPE * const model, const DEEPWAVE_TYPE * const source_amplitudes, const ptrdiff_t * const source_locations, const ptrdiff_t * const shape, const ptrdiff_t num_shots, const ptrdiff_t num_sources_per_shot) { dim3 dimBlock(32, 1, 1); int gridx = (num_sources_per_shot + dimBlock.x - 1) / dimBlock.x; int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; int gridz = 1; dim3 dimGrid(gridx, gridy, gridz); add_sources_kernel<<<dimGrid, dimBlock>>>( next_wavefield, model, source_amplitudes, source_locations, shape[0], shape[1], shape[2], num_shots, num_sources_per_shot); gpuErrchk(cudaPeekAtLastError()); } void __global__ record_receivers_kernel( DEEPWAVE_TYPE * const receiver_amplitudes, const DEEPWAVE_TYPE * const current_wavefield, const ptrdiff_t * const receiver_locations, const ptrdiff_t shape_z, const ptrdiff_t shape_y, const ptrdiff_t shape_x, const ptrdiff_t num_shots, const ptrdiff_t num_receivers_per_shot) { ptrdiff_t shot = blockIdx.y * blockDim.y + threadIdx.y; ptrdiff_t receiver = blockIdx.x * blockDim.x + threadIdx.x; if ((shot < num_shots) && (receiver < num_receivers_per_shot)) { ptrdiff_t r = shot * num_receivers_per_shot + receiver; ptrdiff_t si = shot * shape_z * shape_y * shape_x + location_index(receiver_locations, shape_y, shape_x, r); receiver_amplitudes[r] = current_wavefield[si]; } } void record_receivers(DEEPWAVE_TYPE * const receiver_amplitudes, const DEEPWAVE_TYPE * const current_wavefield, const ptrdiff_t * const receiver_locations, const ptrdiff_t * const shape, const ptrdiff_t num_shots, const ptrdiff_t num_receivers_per_shot) { if (receiver_amplitudes == NULL) return; /* no source inversion */ dim3 dimBlock(32, 1, 1); int gridx = (num_receivers_per_shot + dimBlock.x - 1) / dimBlock.x; int gridy = (num_shots + dimBlock.y - 1) / dimBlock.y; int gridz = 1; dim3 dimGrid(gridx, gridy, gridz); record_receivers_kernel<<<dimGrid, dimBlock>>>( receiver_amplitudes, current_wavefield, receiver_locations, shape[0], shape[1], shape[2], num_shots, num_receivers_per_shot); gpuErrchk(cudaPeekAtLastError()); } void __global__ model_grad_scaling_kernel( DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const scaling, const ptrdiff_t numel_shot) { ptrdiff_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numel_shot) { model_grad[i] *= scaling[i]; } } void model_grad_scaling(DEEPWAVE_TYPE * const model_grad, const DEEPWAVE_TYPE * const scaling, const ptrdiff_t * const shape, const ptrdiff_t * const pml_width) { if (model_grad == NULL) return; /* Not doing model inversion */ const ptrdiff_t numel_shot = shape[0] * shape[1] * shape[2]; dim3 dimBlock(32, 1, 1); int gridx = (numel_shot + dimBlock.x - 1) / dimBlock.x; int gridy = 1; int gridz = 1; dim3 dimGrid(gridx, gridy, gridz); model_grad_scaling_kernel<<<dimGrid, dimBlock>>>(model_grad, scaling, numel_shot); gpuErrchk(cudaPeekAtLastError()); } static inline __device__ DEEPWAVE_TYPE z_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_xy) { return fd1[0] * (arr[si + size_xy] - arr[si - size_xy]) + fd1[1] * (arr[si + 2 * size_xy] - arr[si - 2 * size_xy]); } static inline __device__ DEEPWAVE_TYPE y_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si, const ptrdiff_t size_x) { return fd1[0] * (arr[si + size_x] - arr[si - size_x]) + fd1[1] * (arr[si + 2 * size_x] - arr[si - 2 * size_x]); } static inline __device__ DEEPWAVE_TYPE x_deriv(const DEEPWAVE_TYPE * const arr, const ptrdiff_t si) { return fd1[0] * (arr[si + 1] - arr[si - 1]) + fd1[1] * (arr[si + 2] - arr[si - 2]); }
the_stack
#include <thrust/functional.h> template<typename MatrixType> class block_matrix : public cusp::linear_operator<typename MatrixType::value_type, typename MatrixType::memory_space> { private: typedef typename MatrixType::index_type IndexType; typedef typename MatrixType::value_type ValueType; typedef typename MatrixType::memory_space MemorySpace; typedef cusp::linear_operator<ValueType,MemorySpace> Parent; typedef std::vector<MatrixType> MatrixList; typedef cusp::detail::plus_value<IndexType> ShiftOp; typedef typename MatrixType::row_indices_array_type::const_iterator RowIterator; typedef typename MatrixType::column_indices_array_type::const_iterator ColumnIterator; typedef thrust::transform_iterator<ShiftOp,RowIterator> RowTransformIterator; typedef thrust::transform_iterator<ShiftOp,ColumnIterator> ColumnTransformIterator; typedef cusp::array1d_view<RowTransformIterator> RowView; typedef cusp::array1d_view<ColumnTransformIterator> ColumnView; typedef typename MatrixType::values_array_type::const_view ValueView; typedef cusp::coo_matrix_view<RowView,ColumnView,ValueView> ShiftedViewType; const MatrixList& C_list; const MatrixList& D_list; std::vector<ShiftedViewType> shifted_C_list; std::vector<ShiftedViewType> shifted_D_list; cusp::array1d<unsigned int, MemorySpace> indices; public: block_matrix(MatrixList& D_list, MatrixList& C_list) : Parent(), C_list(C_list), D_list(D_list) { size_t num_rows = 0; size_t num_C_entries = 0; size_t num_D_entries = 0; // Compute number of row and entries in diagonal blocks for(size_t i = 0; i < D_list.size(); i++) { num_rows += D_list[i].num_rows; num_D_entries += D_list[i].num_entries; } // Add entries from block column for(size_t i = 0; i < C_list.size(); i++) num_C_entries += C_list[i].num_entries; size_t num_cols = num_rows - D_list.back().num_rows; size_t num_entries = num_D_entries + (2 * num_C_entries); // Resize parent to correct size of concatenated matrices Parent::resize(num_rows, num_rows, num_entries); // resize indices to total number of nonzeros indices.resize(num_entries, -1); size_t row = 0; size_t col = 0; size_t C_offset = 0; size_t D_offset = 0; for(size_t i = 0; i < C_list.size(); i++) { const MatrixType& A = C_list[i]; ShiftedViewType S_C(row + A.num_rows, num_cols + A.num_cols, A.num_entries, cusp::make_array1d_view(thrust::make_transform_iterator(A.row_indices.cbegin(), ShiftOp(row)), thrust::make_transform_iterator(A.row_indices.cbegin(), ShiftOp(row)) + A.num_entries), cusp::make_array1d_view(thrust::make_transform_iterator(A.column_indices.cbegin(), ShiftOp(num_cols)), thrust::make_transform_iterator(A.column_indices.cbegin(), ShiftOp(num_cols)) + A.num_entries), cusp::make_array1d_view(A.values)); shifted_C_list.push_back(S_C); const MatrixType& B = D_list[i]; ShiftedViewType S_D(row + B.num_rows, row + B.num_cols, B.num_entries, cusp::make_array1d_view(thrust::make_transform_iterator(B.row_indices.cbegin(), ShiftOp(row)), thrust::make_transform_iterator(B.row_indices.cbegin(), ShiftOp(row)) + B.num_entries), cusp::make_array1d_view(thrust::make_transform_iterator(B.column_indices.cbegin(), ShiftOp(col)), thrust::make_transform_iterator(B.column_indices.cbegin(), ShiftOp(col)) + B.num_entries), cusp::make_array1d_view(B.values)); // compute row lengths of C matrix cusp::array1d<IndexType,MemorySpace> C_row_lengths(C_list[i].num_rows + 1, 0); thrust::reduce_by_key(C_list[i].row_indices.begin(), C_list[i].row_indices.end(), thrust::constant_iterator<IndexType>(1), thrust::make_discard_iterator(), C_row_lengths.begin()); // compute row lengths of D matrix cusp::array1d<IndexType,MemorySpace> D_row_lengths(D_list[i].num_rows + 1, 0); thrust::reduce_by_key(D_list[i].row_indices.begin(), D_list[i].row_indices.end(), thrust::constant_iterator<IndexType>(1), thrust::make_discard_iterator(), D_row_lengths.begin()); // compute combined operator offsets cusp::array1d<IndexType,MemorySpace> K_row_offsets(D_list[i].num_rows + 1, 0); thrust::transform(C_row_lengths.begin(), C_row_lengths.end(), D_row_lengths.begin(), K_row_offsets.begin(), thrust::plus<IndexType>()); thrust::exclusive_scan(K_row_offsets.begin(), K_row_offsets.end(), K_row_offsets.begin(), 0); // transform D_row_lengths to D_row_offsets for scattering into D_map thrust::exclusive_scan(D_row_lengths.begin(), D_row_lengths.end(), D_row_lengths.begin(), 0); // allocate array of ones for mapping D nonzeros to K operator cusp::array1d<IndexType,MemorySpace> D_map(D_list[i].num_entries, 1); // scatter starting offsets with respect to K operator into D_map thrust::scatter(K_row_offsets.begin(), K_row_offsets.end(), D_row_lengths.begin(), D_map.begin()); // run segmented scan over indices to construct running offsets of D matrix nonzeros thrust::inclusive_scan_by_key(D_list[i].row_indices.begin(), D_list[i].row_indices.end(), D_map.begin(), D_map.begin()); // scatter final index offsets into indices array thrust::scatter(thrust::counting_iterator<IndexType>(D_offset), thrust::counting_iterator<IndexType>(D_offset + D_list[i].num_entries), D_map.begin(), indices.begin() + C_offset + D_offset); // transform C_row_lengths to C_row_offsets for scattering into C_map thrust::exclusive_scan(C_row_lengths.begin(), C_row_lengths.end(), C_row_lengths.begin(), 0); // shift K_row_offsets by D_row_offsets thrust::reduce_by_key(D_list[i].row_indices.begin(), D_list[i].row_indices.end(), thrust::constant_iterator<IndexType>(1), thrust::make_discard_iterator(), D_row_lengths.begin()); thrust::transform(D_row_lengths.begin(), D_row_lengths.end(), K_row_offsets.begin(), K_row_offsets.begin(), thrust::plus<IndexType>()); // allocate array of ones for mapping C nonzeros to K operator cusp::array1d<IndexType,MemorySpace> C_map(C_list[i].num_entries, 1); // scatter starting offsets with respect to K operator into C_map thrust::scatter(K_row_offsets.begin(), K_row_offsets.end(), C_row_lengths.begin(), C_map.begin()); // run segmented scan over indices to construct running offsets of D matrix nonzeros thrust::inclusive_scan_by_key(C_list[i].row_indices.begin(), C_list[i].row_indices.end(), C_map.begin(), C_map.begin()); // scatter final index offsets into indices array thrust::scatter(thrust::counting_iterator<IndexType>(num_D_entries + C_offset), thrust::counting_iterator<IndexType>(C_offset + num_D_entries + C_list[i].num_entries), C_map.begin(), indices.begin() + C_offset + D_offset); // increment C and D matrix starting offsets row += D_list[i].num_rows; col += D_list[i].num_cols; C_offset += C_list[i].num_entries; D_offset += D_list[i].num_entries; } { cusp::counting_array<IndexType> C_indices(num_C_entries); auto B_join_indices = cusp::make_array1d_view(cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].column_indices.begin(), shifted_C_list[1].column_indices.begin(), C_indices.begin()), cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].column_indices.begin(), shifted_C_list[1].column_indices.begin(), C_indices.begin()) + num_C_entries); cusp::array1d<IndexType,MemorySpace> C_t_indices(num_C_entries); thrust::sequence(C_t_indices.begin(), C_t_indices.end()); cusp::array1d<IndexType,MemorySpace> B_column_indices(B_join_indices); thrust::stable_sort_by_key(B_column_indices.begin(), B_column_indices.end(), C_t_indices.begin()); auto B_t = cusp::make_coo_matrix_view(num_rows, num_cols, num_C_entries, cusp::make_array1d_view(cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].column_indices.begin(), shifted_C_list[1].column_indices.begin(), C_t_indices.begin()), cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].column_indices.begin(), shifted_C_list[1].column_indices.begin(), C_t_indices.begin()) + num_C_entries), cusp::make_array1d_view(cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].row_indices.begin(), shifted_C_list[1].row_indices.begin(), C_t_indices.begin()), cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].row_indices.begin(), shifted_C_list[1].row_indices.begin(), C_t_indices.begin()) + num_C_entries), cusp::make_array1d_view(cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].values.begin(), shifted_C_list[1].values.begin(), C_t_indices.begin()), cusp::make_join_iterator(shifted_C_list[0].num_entries, shifted_C_list[1].num_entries, shifted_C_list[0].values.begin(), shifted_C_list[1].values.begin(), C_t_indices.begin()) + num_C_entries)); cusp::array1d<IndexType,MemorySpace> B_t_row_offsets(num_rows + 1); cusp::indices_to_offsets(B_t.row_indices, B_t_row_offsets); const MatrixType& E = D_list.back(); // compute row lengths of D matrix cusp::array1d<IndexType,MemorySpace> E_row_lengths(num_rows + 1, 0); thrust::reduce_by_key(E.row_indices.begin(), E.row_indices.end(), thrust::constant_iterator<IndexType>(1), thrust::make_discard_iterator(), E_row_lengths.begin() + row); // compute row lengths of D matrix cusp::array1d<IndexType,MemorySpace> B_t_row_lengths(num_rows + 1, 0); thrust::reduce_by_key(B_t.row_indices.begin(), B_t.row_indices.end(), thrust::constant_iterator<IndexType>(1), thrust::make_discard_iterator(), B_t_row_lengths.begin() + row); // compute combined operator offsets cusp::array1d<IndexType,MemorySpace> K_row_offsets(num_rows + 1, 0); thrust::transform(E_row_lengths.begin(), E_row_lengths.end(), B_t_row_lengths.begin(), K_row_offsets.begin(), thrust::plus<IndexType>()); thrust::exclusive_scan(K_row_offsets.begin(), K_row_offsets.end(), K_row_offsets.begin(), 0); // transform D_row_lengths to D_row_offsets for scattering into D_map thrust::exclusive_scan(B_t_row_lengths.begin(), B_t_row_lengths.end(), B_t_row_lengths.begin(), 0); // allocate array of ones for mapping D nonzeros to K operator cusp::array1d<IndexType,MemorySpace> B_t_map(B_t.num_entries, 1); // scatter starting offsets with respect to K operator into D_map thrust::scatter(K_row_offsets.begin(), K_row_offsets.end(), B_t_row_lengths.begin(), B_t_map.begin()); // run segmented scan over indices to construct running offsets of D matrix nonzeros thrust::inclusive_scan_by_key(B_t.row_indices.begin(), B_t.row_indices.end(), B_t_map.begin(), B_t_map.begin()); cusp::print(K_row_offsets); cusp::print(B_t_row_lengths); cusp::print(B_t_map); // scatter final index offsets into indices array thrust::scatter(thrust::counting_iterator<IndexType>(C_offset + D_offset), thrust::counting_iterator<IndexType>(C_offset + D_offset + B_t.num_entries), B_t_map.begin(), indices.begin() + C_offset + D_offset); // shift K_row_offsets by D_row_offsets /* cusp::blas::fill(E_row_lengths, 0); */ /* thrust::reduce_by_key(E.row_indices.begin(), E.row_indices.end(), */ /* thrust::constant_iterator<IndexType>(1), */ /* thrust::make_discard_iterator(), */ /* E_row_lengths.begin()); */ /* thrust::transform(B_t_row_lengths.begin(), B_t_row_lengths.end(), */ /* K_row_offsets.begin(), K_row_offsets.begin(), */ /* thrust::plus<IndexType>()); */ /* */ /* // transform C_row_lengths to C_row_offsets for scattering into C_map */ /* thrust::exclusive_scan(E_row_lengths.begin(), E_row_lengths.end(), E_row_lengths.begin(), 0); */ /* */ /* // allocate array of ones for mapping C nonzeros to K operator */ /* cusp::array1d<IndexType,MemorySpace> C_map(E.num_entries, 1); */ /* // scatter starting offsets with respect to K operator into C_map */ /* thrust::scatter(K_row_offsets.begin(), K_row_offsets.end(), E_row_lengths.begin(), C_map.begin()); */ /* // run segmented scan over indices to construct running offsets of D matrix nonzeros */ /* thrust::inclusive_scan_by_key(E.row_indices.begin(), */ /* E.row_indices.end(), */ /* C_map.begin(), */ /* C_map.begin()); */ /* // scatter final index offsets into indices array */ /* thrust::scatter(thrust::counting_iterator<IndexType>(C_offset + D_offset), */ /* thrust::counting_iterator<IndexType>(C_offset + D_offset + E.num_entries), */ /* C_map.begin(), */ /* indices.begin() + C_offset + D_offset + E.num_entries); */ } } }; int main(void) { typedef cusp::device_memory MemorySpace; typedef cusp::coo_matrix<int,float,MemorySpace> MatrixType; MatrixType A; cusp::gallery::poisson5pt(A, 5, 5); std::cout << "Generated base operator with shape (" << A.num_rows << "," << A.num_cols << ") and " << A.num_entries << " entries" << "\n\n"; std::vector<MatrixType> D_list; std::vector<MatrixType> C_list; D_list.push_back(A); D_list.push_back(A); D_list.push_back(A); C_list.push_back(A); C_list.push_back(A); auto K = block_matrix<MatrixType>(D_list, C_list); std::cout << "Generated concatenated operator with shape (" << K.num_rows << "," << K.num_cols << ") and " << K.num_entries << " entries" << "\n\n"; return 0; }
the_stack
#include <iostream> #include <cstdlib> #include <vector> // for each state // start_weight // end_weight // Transition: float weight, int input_label, int state // alpha_transition_index // beta_transition_index #define CHECK_CUDA(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } #define ATOMIC_CONST 32 #define CU_BLOCK_DIM 1024 __host__ __device__ inline float log_plus(float a, float b) { if (a == -float(INFINITY)) return b; if (b == -float(INFINITY)) return a; float m = a > b ? a : b; return log1pf(expf(-fabs(a - b))) + m; } __device__ float atomic_log_plus(float *addr_f, float value) { int *addr = (int*)addr_f; float expected = *addr_f; float sum = log_plus(expected, value); int old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); while (old_value != __float_as_int(expected)) { expected = __int_as_float(old_value); sum = log_plus(expected, value); old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum)); } return __int_as_float(old_value); } struct Transition { float weight = -float(INFINITY); int label = 0; int state = 0; }; struct IntPair { int first = 1; int second = 0; }; // <<<batch_size, CU_BLOCK_CONST>>> __global__ void alpha_first_kernel(float *alpha, const int alpha_size, const int batch_size, const int T, const float * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[mini_batch_idx * alpha_size * (T+1) + idx] = start_weight[idx]; } } __global__ void alpha_kernel(float *alpha, const float* const logits, const int batch_size, const int T, const int t, const int * const input_lengths, const int alpha_size, const int logits_size, const IntPair * const alpha_transition_index, const Transition * const alpha_transition) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t > input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * alpha_size * (T+1) + alpha_size * t; int idx2 = mini_batch_idx * alpha_size * (T+1) + alpha_size * (t-1); int idx3 = mini_batch_idx * logits_size * T + logits_size * (t-1); for (int idx = tid; idx < alpha_size; idx += blockDim.x) { int start = alpha_transition_index[idx].first; int end = alpha_transition_index[idx].second; float result = -float(INFINITY); for (int k = start; k <= end; k++) { result = log_plus(alpha[idx2+alpha_transition[k].state] + alpha_transition[k].weight + logits[idx3+alpha_transition[k].label], result); } alpha[idx1+idx] = result; } } __global__ void alpha_last_kernel(float *alpha, const int alpha_size, const int batch_size, const int T, const int * const input_lengths, const float * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int alpha_start = mini_batch_idx * alpha_size * (T+1); int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < alpha_size; idx += blockDim.x) { alpha[alpha_start+cT*alpha_size+idx] += end_weight[idx]; } } // <<< minibatch, N = 32,64,128...>>> __global__ void alpha_lld_kernal(const float * const alpha, const int alpha_size, const int T, const int * const input_lengths, float * loglikelihood) { int mini_batch_idx = blockIdx.x; int idx = threadIdx.x; int block_dim = blockDim.x; int cT = input_lengths[mini_batch_idx]; int last_idx = alpha_size * (T+1) * mini_batch_idx + cT*alpha_size; // printf("enter alpha_lld_kernal, block.x: %d, thread.x: %d\n", blockIdx.x, threadIdx.x); extern __shared__ float sdata[]; float temp = -float(INFINITY); for (int i = idx; i < alpha_size; i += block_dim) { temp = log_plus(temp, alpha[last_idx+i]); } sdata[idx] = temp; __syncthreads(); for (int shift = block_dim / 2; shift > warpSize; shift >>= 1) { if (idx < shift) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } __syncthreads(); } if (idx < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]); } } __syncthreads(); if (idx == 0) { loglikelihood[mini_batch_idx] = sdata[0]; // printf("alpha loglikelihod: %f mini_batch %d\n", loglikelihood[mini_batch_idx], mini_batch_idx); } } __global__ void beta_last_kernel(float *beta, const int beta_size, const int batch_size, const int * const input_lengths, const float * const end_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; int cT = input_lengths[mini_batch_idx]; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + (cT % 2) * beta_size + idx] = end_weight[idx]; } } __global__ void beta_first_kernel(float *beta, const int beta_size, const int batch_size, const float * const start_weight) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; for (int idx = tid; idx < beta_size; idx += blockDim.x) { beta[mini_batch_idx * 2 * beta_size + idx] += start_weight[idx]; } } __global__ void beta_kernel(float *beta, const float* const alpha, const float* const logits, float *grad_storage, const int batch_size, const int T, const int t, const int *input_lengths, const int beta_size, const int logits_size, const IntPair * const beta_transition_index, const Transition * const beta_transition) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; int idx1 = mini_batch_idx * beta_size * (T+1) + beta_size * t; int idx2 = mini_batch_idx * beta_size * 2 + beta_size * ((t+1) % 2); int idx3 = mini_batch_idx * beta_size * 2 + beta_size * (t % 2); int idx4 = mini_batch_idx * logits_size * T + logits_size * t; int idx5 = mini_batch_idx * logits_size * ATOMIC_CONST; for (int idx = tid; idx < beta_size; idx += blockDim.x) { int start = beta_transition_index[idx].first; int end = beta_transition_index[idx].second; float beta_result = -float(INFINITY); float temp_value = -float(INFINITY); for (int k = start; k <= end; k++) { temp_value = beta[idx2+beta_transition[k].state] + beta_transition[k].weight + logits[idx4+beta_transition[k].label]; beta_result = log_plus(temp_value, beta_result); float partial_grad = alpha[idx1+idx] + temp_value; float *grad_position = grad_storage + idx5 + beta_transition[k].label * ATOMIC_CONST + threadIdx.x % ATOMIC_CONST; atomic_log_plus(grad_position, partial_grad); } beta[idx3+idx] = beta_result; } } __global__ void copy_grad(float *grad_storage, float *grad_net, const float * const alpha_lld, const int * const input_lengths, const int batch_size, const int logits_size, const int T, const int t) { int mini_batch_idx = blockIdx.x; int tid = threadIdx.x; if (t >= input_lengths[mini_batch_idx]) return; float lld = alpha_lld[mini_batch_idx]; for (int idx = tid; idx < logits_size; idx += blockDim.x) { float *grad_position = grad_net + mini_batch_idx*logits_size*T + t*logits_size + idx; int idx_storage = mini_batch_idx*logits_size*ATOMIC_CONST+idx*ATOMIC_CONST; float grad = -float(INFINITY); for (int i = 0; i < ATOMIC_CONST; i++) { grad = log_plus(grad_storage[idx_storage+i], grad); grad_storage[idx_storage+i] = -float(INFINITY); } *grad_position = expf(grad - lld); } } __global__ void beta_lld_kernal(const float * const beta, const int beta_size, float * loglikelihood) { int idx = threadIdx.x; int first_idx = beta_size * 2 * idx; loglikelihood[idx] = beta[first_idx]; } Transition ** TRANSITION_ALPHA = NULL; Transition ** TRANSITION_BETA = NULL; IntPair ** TRANSITION_INDEX_ALPHA = NULL; IntPair ** TRANSITION_INDEX_BETA = NULL; float ** START_WEIGHT = NULL; float ** END_WEIGHT = NULL; int DEN_NUM_ARCS = 0; int DEN_NUM_STATES = 0; int *DEVICE_HASH = NULL; void ReadFst(const char * fst_name, std::vector<std::vector<int> > &alpha_next, std::vector<std::vector<int> > &beta_next, std::vector<std::vector<int> > &alpha_ilabel, std::vector<std::vector<int> > &beta_ilabel, std::vector<std::vector<float> > &alpha_weight, std::vector<std::vector<float> > &beta_weight, std::vector<float> &start_weight, std::vector<float> &end_weight, int &num_states, int &num_arcs); extern "C" { void Init(const char * fst_name, int n_gpus, int * gpus) { std::vector<std::vector<int> > alpha_next; std::vector<std::vector<int> > beta_next; std::vector<std::vector<int> > alpha_ilabel; std::vector<std::vector<int> > beta_ilabel; std::vector<std::vector<float> > alpha_weight; std::vector<std::vector<float> > beta_weight; std::vector<float> start_weight; std::vector<float> end_weight; // const char * fst_name = "test_lm.fst"; int num_states = 0; int num_arcs = 0; ReadFst(fst_name, alpha_next, beta_next, alpha_ilabel, beta_ilabel, alpha_weight, beta_weight, start_weight, end_weight, num_states, num_arcs); DEN_NUM_ARCS = num_arcs; DEN_NUM_STATES = num_states; // std::cout << "DEN_NUM_ARCS: " << DEN_NUM_ARCS << std::endl; // std::cout << "DEN_NUM_STATES: " << DEN_NUM_STATES << std::endl; std::vector<Transition> transition_alpha(num_arcs); std::vector<Transition> transition_beta(num_arcs); std::vector<IntPair> transition_index_alpha(num_states); std::vector<IntPair> transition_index_beta(num_states); int count = 0; for (int i = 0; i < num_states; i++) { if (alpha_next[i].empty()) { transition_index_alpha[i].first = 1; transition_index_alpha[i].second = 0; } else { transition_index_alpha[i].first = count; for (int j = 0; j < alpha_next[i].size(); j++) { transition_alpha[count].state = alpha_next[i][j]; transition_alpha[count].label = alpha_ilabel[i][j]; transition_alpha[count].weight = alpha_weight[i][j]; count++; } transition_index_alpha[i].second = count-1; } } if (count != num_arcs) { std::cerr << "count does not equal to num_arcs" << std::endl; exit(-1); } count = 0; for (int i = 0; i < num_states; i++) { if (beta_next[i].empty()) { transition_index_beta[i].first = 1; transition_index_beta[i].second = 0; } else { transition_index_beta[i].first = count; for (int j = 0; j < beta_next[i].size(); j++) { transition_beta[count].state = beta_next[i][j]; transition_beta[count].label = beta_ilabel[i][j]; transition_beta[count].weight = beta_weight[i][j]; count++; } transition_index_beta[i].second = count-1; } } if (count != num_arcs) { std::cerr << "count does not equal to num_arcs" << std::endl; exit(-1); } int max_gpu = 0; for (int i = 0; i < n_gpus; i++) { if (gpus[i] > max_gpu) max_gpu = gpus[i]; } DEVICE_HASH = new int[max_gpu+1]; memset(DEVICE_HASH, 0, sizeof(int)*(max_gpu+1)); for (int i = 0; i < n_gpus; i++) DEVICE_HASH[gpus[i]] = i; TRANSITION_ALPHA = new Transition*[n_gpus]; TRANSITION_BETA= new Transition*[n_gpus]; TRANSITION_INDEX_ALPHA = new IntPair*[n_gpus]; TRANSITION_INDEX_BETA= new IntPair*[n_gpus]; START_WEIGHT = new float*[n_gpus]; END_WEIGHT = new float*[n_gpus]; int prev_device = 0; CHECK_CUDA(cudaGetDevice(&prev_device)); for (int i = 0; i < n_gpus; i++) { std::cout << "gpu i: " << gpus[i] << std::endl; CHECK_CUDA(cudaSetDevice(gpus[i])); CHECK_CUDA(cudaMalloc((void**)&TRANSITION_ALPHA[i], sizeof(Transition)*num_arcs)); CHECK_CUDA(cudaMalloc((void**)&TRANSITION_BETA[i], sizeof(Transition)*num_arcs)); CHECK_CUDA(cudaMalloc((void**)&TRANSITION_INDEX_ALPHA[i], sizeof(IntPair)*num_states)); CHECK_CUDA(cudaMalloc((void**)&TRANSITION_INDEX_BETA[i], sizeof(IntPair)*num_states)); CHECK_CUDA(cudaMalloc((void**)&START_WEIGHT[i], sizeof(float)*num_states)); CHECK_CUDA(cudaMalloc((void**)&END_WEIGHT[i], sizeof(float)*num_states)); CHECK_CUDA(cudaMemcpy(TRANSITION_ALPHA[i], transition_alpha.data(), sizeof(Transition)*num_arcs, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(TRANSITION_BETA[i], transition_beta.data(), sizeof(Transition)*num_arcs, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(TRANSITION_INDEX_ALPHA[i], transition_index_alpha.data(), sizeof(IntPair)*num_states, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(TRANSITION_INDEX_BETA[i], transition_index_beta.data(), sizeof(IntPair)*num_states, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(START_WEIGHT[i], start_weight.data(), sizeof(float)*num_states, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(END_WEIGHT[i], end_weight.data(), sizeof(float)*num_states, cudaMemcpyHostToDevice)); } CHECK_CUDA(cudaSetDevice(prev_device)); } void Release(int n_gpus, int *gpus) { int prev_device = 0; CHECK_CUDA(cudaGetDevice(&prev_device)); for (int i = 0; i < n_gpus; i++) { CHECK_CUDA(cudaSetDevice(gpus[i])); CHECK_CUDA(cudaFree(TRANSITION_ALPHA[i])); CHECK_CUDA(cudaFree(TRANSITION_BETA[i])); CHECK_CUDA(cudaFree(TRANSITION_INDEX_ALPHA[i])); CHECK_CUDA(cudaFree(TRANSITION_INDEX_BETA[i])); CHECK_CUDA(cudaFree(START_WEIGHT[i])); CHECK_CUDA(cudaFree(END_WEIGHT[i])); } CHECK_CUDA(cudaSetDevice(prev_device)); delete[] TRANSITION_ALPHA; delete[] TRANSITION_BETA; delete[] TRANSITION_INDEX_ALPHA; delete[] TRANSITION_INDEX_BETA; delete[] START_WEIGHT; delete[] END_WEIGHT; TRANSITION_ALPHA = NULL; TRANSITION_BETA = NULL; TRANSITION_INDEX_ALPHA = NULL; TRANSITION_INDEX_BETA = NULL; START_WEIGHT = NULL; END_WEIGHT = NULL; delete[] DEVICE_HASH; DEVICE_HASH = NULL; } void compute_alpha(float *alpha, float *logits, const int batch_size, int T, const int alpha_size, int logits_size, int *input_lengths, float * loglikelihood, cudaStream_t stream) { int device = 0; CHECK_CUDA(cudaGetDevice(&device)); int gid = DEVICE_HASH[device]; int alpha_lld_dim = 128; alpha_first_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, alpha_size, batch_size, T, START_WEIGHT[gid]); for (int t = 1; t <= T; t++) { alpha_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, logits, batch_size, T, t, input_lengths, alpha_size, logits_size, TRANSITION_INDEX_ALPHA[gid], TRANSITION_ALPHA[gid]); } alpha_last_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, alpha_size, batch_size, T, input_lengths, END_WEIGHT[gid]); alpha_lld_kernal<<<batch_size, alpha_lld_dim, sizeof(float)*alpha_lld_dim, stream>>>(alpha, alpha_size, T, input_lengths, loglikelihood); // cudaDeviceSynchronize(); } void compute_beta_and_grad(float *beta, const float * const alpha, const float * const logits, const float * const alpha_lld, float *grad_storage, float *grad_net, const int batch_size, const int T, const int beta_size, const int logits_size, const int * const input_lengths, float * loglikelihood, cudaStream_t stream) { int device = 0; CHECK_CUDA(cudaGetDevice(&device)); int gid= DEVICE_HASH[device]; // set grad_storage copy_grad<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, 0); beta_last_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, beta_size, batch_size, input_lengths, END_WEIGHT[gid]); for (int t = T-1; t >= 0; t--) { beta_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, alpha, logits, grad_storage, batch_size, T, t, input_lengths, beta_size, logits_size, TRANSITION_INDEX_BETA[gid], TRANSITION_BETA[gid]); copy_grad<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, t); } beta_first_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, beta_size, batch_size, START_WEIGHT[gid]); beta_lld_kernal<<<1, batch_size, 0, stream>>>(beta, beta_size, loglikelihood); } }
the_stack
#include <SDL.h> #include <cstdio> #include <cfloat> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include <vector_types.h> #include "Types.h" #include "Base3d.h" #include "Camera.h" #include "cudarenderer.h" ///////////////////////////////// // Raytracing configuration #define THREADS_PER_BLOCK 64 // What depth to stop reflections and refractions? #define MAX_RAY_DEPTH 2 // Ray intersections of a distance <=NUDGE_FACTOR (from the origin) don't count #define NUDGE_FACTOR 1e-5f // How much the reflected color contributes to the overall #define REFLECTIONS_RATE 0.375f ////////////////////////////// // Enable ambient occlusion? //#define AMBIENT_OCCLUSION // How many ambient rays to spawn per ray intersection? #define AMBIENT_SAMPLES 32 // How close to check for ambient occlusion? #define AMBIENT_RANGE 0.15f __constant__ unsigned VERTICES; __constant__ unsigned TRIANGLES; // Textures for vertices, triangles and BVH data // (see CudaRender() below, as well as main() to see the data setup process) texture<uint1, 1, cudaReadModeElementType> g_triIdxListTexture; texture<float2, 1, cudaReadModeElementType> g_pCFBVHlimitsTexture; texture<uint4, 1, cudaReadModeElementType> g_pCFBVHindexesOrTrilistsTexture; texture<float4, 1, cudaReadModeElementType> g_verticesTexture; texture<float4, 1, cudaReadModeElementType> g_trianglesTexture; // Utility functions // CUDA dot product __device__ coord dotCUDA(const Vector3& l, const Vector3& r) { return l._x*r._x +l._y*r._y +l._z*r._z; } __device__ coord dotCUDA(const float4& l, const Vector3& r) { return l.x*r._x +l.y*r._y +l.z*r._z; } __device__ coord dotCUDA(const Vector3& l, const float4& r) { return l._x*r.x +l._y*r.y +l._z*r.z; } // CUDA cross __device__ Vector3 crossCUDA(const Vector3& l, const Vector3& r) { coord x,y,z; const coord &aax=l._x; const coord &aay=l._y; const coord &aaz=l._z; const coord &bbx=r._x; const coord &bby=r._y; const coord &bbz=r._z; x=aay*bbz-bby*aaz; y=bbx*aaz-aax*bbz; z=aax*bby-aay*bbx; return Vector3(x,y,z); } // CUDA distance of two points __device__ coord distanceCUDA(const Vector3& a, const Vector3& b) { coord dx=a._x - b._x; coord dy=a._y - b._y; coord dz=a._z - b._z; return sqrt(dx*dx + dy*dy + dz*dz); } // Sometime you just want to compare, so no sqrt is needed __device__ coord distancesqCUDA(const Vector3& a, const Vector3& b) { coord dx=a._x - b._x; coord dy=a._y - b._y; coord dz=a._z - b._z; return dx*dx + dy*dy + dz*dz; } // Matrix3x3 multipled by Vector3 __device__ Vector3 multiplyRightWith(const Matrix3& mv, const Vector3& r) { coord xnew = mv._row1._x*r._x + mv._row1._y*r._y + mv._row1._z*r._z; coord ynew = mv._row2._x*r._x + mv._row2._y*r._y + mv._row2._z*r._z; coord znew = mv._row3._x*r._x + mv._row3._y*r._y + mv._row3._z*r._z; return Vector3(xnew, ynew, znew); } // Transform Vector3 to any space, given Matrix3 and origin __device__ Vector3 inline TransformToSomeSpace(Vector3 point, Matrix3 *mv, Vector3 *origin) { point -= *origin; return multiplyRightWith(*mv, point); } // After transformation in camera space, project and plot (used for point rendering) #define CLIPPLANEDISTANCE 0.2f __device__ void inline ProjectAndPlot(const Vector3& xformed, int *pixels, int defaultColor=0x00FFFFFF ) { if (xformed._z>CLIPPLANEDISTANCE) { int x = (int)(MAXX/2.f + FOV * xformed._y/xformed._z); int y = (int)(MAXY/2.f - FOV * xformed._x/xformed._z); if (y>=0.f && y<(int)MAXY && x>=0.f && x<(int)MAXX) pixels[y*MAXX + x] = defaultColor; } } //////////////////////////////////////// // Rendering kernel for MODE_POINTS //////////////////////////////////////// __global__ void CoreLoopVertices(int *pixels, Matrix3 *cudaWorldToCameraSpace, Vector3 *eye) { unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= VERTICES) return; // Simple projection and ploting of a white point per vertex // Plot projected coordinates (on screen) Vector3 v(tex1Dfetch(g_verticesTexture, 2*idx)); ProjectAndPlot( TransformToSomeSpace(v, cudaWorldToCameraSpace, eye), pixels); } ////////////////////////////////////////////// // Rendering kernel for MODE_POINTSHIDDEN ////////////////////////////////////////////// // Create OpenGL BGR value for assignment in PBO buffer __device__ int getColor(Pixel& p) { return (((unsigned)p._b) << 16) | (((unsigned)p._g) << 8) | (((unsigned)p._r)); } __global__ void CoreLoopTriangles(int *pixels, Matrix3 *cudaWorldToCameraSpace, Triangle *pTriangles, Vector3 *eye) { unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx >= TRIANGLES) return; // First check if the triangle is visible from where we stand // (closed objects only) float4 center = tex1Dfetch(g_trianglesTexture, 5*idx); float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1); Vector3 triToEye = *eye; triToEye -= center; // Normally we would normalize, but since we just need the sign // of the dot product (to determine if it facing us or not)... //triToEye.normalize(); //if (!pTriangles[idx]._twoSided && dotCUDA(triToEye, pTriangles[idx]._normal)<0.f) if (center.w == 0.f && dotCUDA(triToEye, normal)<0.f) return; int color = getColor(pTriangles[idx]._colorf); // For each of the 3 vertices of triangle j of object i, // transform to camera space, project and plot them Vector3 v1(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx1)); Vector3 v2(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx2)); Vector3 v3(tex1Dfetch(g_verticesTexture, 2*pTriangles[idx]._idx3)); ProjectAndPlot( TransformToSomeSpace(v1, cudaWorldToCameraSpace, eye), pixels, color); ProjectAndPlot( TransformToSomeSpace(v2, cudaWorldToCameraSpace, eye), pixels, color); ProjectAndPlot( TransformToSomeSpace(v3, cudaWorldToCameraSpace, eye), pixels, color); } /////////////////////////////////////////////// // Raytracing modes /////////////////////////////////////////////// // Helper function, that checks whether a ray intersects a bbox __device__ bool RayIntersectsBox( const Vector3& originInWorldSpace, const Vector3& rayInWorldSpace, int boxIdx) { // set Tnear = - infinity, Tfar = infinity // // For each pair of planes P associated with X, Y, and Z do: // (example using X planes) // if direction Xd = 0 then the ray is parallel to the X planes, so // if origin Xo is not between the slabs ( Xo < Xl or Xo > Xh) then // return false // else, if the ray is not parallel to the plane then // begin // compute the intersection distance of the planes // T1 = (Xl - Xo) / Xd // T2 = (Xh - Xo) / Xd // If T1 > T2 swap (T1, T2) /* since T1 intersection with near plane */ // If T1 > Tnear set Tnear =T1 /* want largest Tnear */ // If T2 < Tfar set Tfar="T2" /* want smallest Tfar */ // If Tnear > Tfar box is missed so // return false // If Tfar < 0 box is behind ray // return false // end // end of for loop // // If Box survived all above tests, return true with intersection point Tnear and exit point Tfar. coord Tnear, Tfar; Tnear = -FLT_MAX; Tfar = FLT_MAX; float2 limits; #define CHECK_NEAR_AND_FAR_INTERSECTION(c) \ if (rayInWorldSpace._ ## c == 0.f) { \ if (originInWorldSpace._##c < limits.x) return false; \ if (originInWorldSpace._##c > limits.y) return false; \ } else { \ coord T1 = (limits.x - originInWorldSpace._##c)/rayInWorldSpace._##c; \ coord T2 = (limits.y - originInWorldSpace._##c)/rayInWorldSpace._##c; \ if (T1>T2) { coord tmp=T1; T1=T2; T2=tmp; } \ if (T1 > Tnear) Tnear = T1; \ if (T2 < Tfar) Tfar = T2; \ if (Tnear > Tfar) \ return false; \ if (Tfar < 0.f) \ return false; \ } limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx); // box.bottom._x/top._x placed in limits.x/limits.y CHECK_NEAR_AND_FAR_INTERSECTION(x) limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+1); // box.bottom._y/top._y placed in limits.x/limits.y CHECK_NEAR_AND_FAR_INTERSECTION(y) limits = tex1Dfetch(g_pCFBVHlimitsTexture, 3*boxIdx+2); // box.bottom._z/top._z placed in limits.x/limits.y CHECK_NEAR_AND_FAR_INTERSECTION(z) return true; } // Templated ray/triangle intersection function - offers two compile-time options: // // The first one is used to discriminate between shadow rays (that stop at the first hit) // and normal rays, that have to find the closest hit. // // The second one enables or disables culling of backfacing triangles, and is... // (a) enabled for the refraction call (which needs both front and back-faces), but // (b) disabled for reflections and shadow rays. // // C++ compile-time power... all lesser languages bow down... :-) // template <bool stopAtfirstRayHit, bool doCulling> __device__ bool BVH_IntersectTriangles( // Inputs //Triangle *pTriangles, const Vector3& origin, const Vector3& ray, unsigned avoidSelf, // outputs int& pBestTriIdx, // // both inputs and outputs! // // for normal rays: // pointHitInWorldSpace (output) // kXX (outputs) perpendicular distances of intersection point from the 3 triangle edges // (used for PhongNormal calculations) // // for shadow rays: // pointHitInWorldSpace (input) provides the light position Vector3& pointHitInWorldSpace, coord& kAB, coord& kBC, coord& kCA) { // in the loop below, maintain the closest triangle and the point where we hit it: pBestTriIdx = -1; coord bestTriDist; // light position passed-in pointHitInWorldSpace (only in shadow mode - i.e. stopAtfirstRayHit=true) Vector3& lightPos = pointHitInWorldSpace; // Compile-time work (stopAtfirstRayHit is template param) if (stopAtfirstRayHit) // In shadow ray mode, start from light distance bestTriDist = distancesqCUDA(origin, lightPos); else // In normal mode, start from infinity bestTriDist = FLT_MAX; int stack[BVH_STACK_SIZE]; int stackIdx = 0; stack[stackIdx++] = 0; while(stackIdx) { int boxIdx = stack[stackIdx-1]; //CacheFriendlyBVHNode *pCurrent = &cudaBVHNodes[boxIdx]; stackIdx--; uint4 data = tex1Dfetch(g_pCFBVHindexesOrTrilistsTexture, boxIdx); // original, "pure" BVH form... //if (!pCurrent->IsLeaf()) { // cache-friendly BVH form... //if (!(pCurrent->u.leaf._count & 0x80000000)) { // textured BVH form... if (!(data.x & 0x80000000)) { if (RayIntersectsBox(origin, ray, boxIdx)) { //stack[stackIdx++] = pCurrent->u.inner._idxRight; stack[stackIdx++] = data.y; //stack[stackIdx++] = pCurrent->u.inner._idxLeft; stack[stackIdx++] = data.z; if(stackIdx>BVH_STACK_SIZE) { return false; // XXX } } } else { // original, "pure" BVH form... //BVHLeaf *p = dynamic_cast<BVHLeaf*>(pCurrent); //for(std::list<const Triangle*>::iterator it=p->_triangles.begin(); // it != p->_triangles.end(); // it++) // cache-friendly BVH form... //for(unsigned i=pCurrent->u.leaf._startIndexInTriIndexList; // i<pCurrent->u.leaf._startIndexInTriIndexList + (pCurrent->u.leaf._count & 0x7fffffff); // textured BVH form... for(unsigned i=data.w; i<data.w + (data.x & 0x7fffffff); i++) { // original, "pure" BVH form... //const Triangle& triangle = *(*it); // cache-friendly BVH form... //const Triangle& triangle = pTriangles[cudaTriIdxList[i]]; // textured BVH form... int idx = tex1Dfetch(g_triIdxListTexture, i).x; if (avoidSelf == idx) continue; // avoid self-reflections/refractions float4 center = tex1Dfetch(g_trianglesTexture, 5*idx); float4 normal = tex1Dfetch(g_trianglesTexture, 5*idx+1); // doCulling is a compile-time param, this code will be "codegenerated" // at compile time only for reflection-related calls to Raytrace (see below) //if (doCulling && !triangle._twoSided) { if (doCulling && (center.w == 0.f)) { // template-param, compile-time check // Check visibility of triangle via dot product Vector3 fromTriToOrigin = origin; //fromTriToOrigin -= triangle._center; fromTriToOrigin -= center; // Normally we would normalize, but since we just need the sign // of the dot product (to determine if it facing us or not)... //fromTriToOrigin.normalize(); if (dotCUDA(fromTriToOrigin, normal)<0) continue; } // Use the pre-computed triangle intersection data: normal, d, e1/d1, e2/d2, e3/d3 coord k = dotCUDA(normal, ray); if (k == 0.0f) continue; // this triangle is parallel to the ray, ignore it. coord s = (normal.w - dotCUDA(normal, origin))/k; if (s <= 0.0f) // this triangle is "behind" the origin. continue; if (s <= NUDGE_FACTOR) continue; Vector3 hit = ray*s; hit += origin; // Is the intersection of the ray with the triangle's plane INSIDE the triangle? float4 ee1 = tex1Dfetch(g_trianglesTexture, 5*idx+2); coord kt1 = dotCUDA(ee1, hit) - ee1.w; if (kt1<0.0f) continue; float4 ee2 = tex1Dfetch(g_trianglesTexture, 5*idx+3); coord kt2 = dotCUDA(ee2, hit) - ee2.w; if (kt2<0.0f) continue; float4 ee3 = tex1Dfetch(g_trianglesTexture, 5*idx+4); coord kt3 = dotCUDA(ee3, hit) - ee3.w; if (kt3<0.0f) continue; // It is, "hit" is the world space coordinate of the intersection. // Was this a normal ray or a shadow ray? (template param) if (stopAtfirstRayHit) { // Shadow ray, check whether the triangle obstructs the light coord dist = distancesqCUDA(lightPos, hit); if (dist < bestTriDist) // distance to light (squared) passed in kAB return true; // we found a triangle obstructing the light, return true } else { // Normal ray - it this intersection closer than all the others? coord hitZ = distancesqCUDA(origin, hit); if (hitZ < bestTriDist) { // maintain the closest hit bestTriDist = hitZ; pBestTriIdx = idx; pointHitInWorldSpace = hit; kAB = kt1; kBC = kt2; kCA = kt3; } } } } } // Normal ray or shadow ray? (compile-time template param) if (!stopAtfirstRayHit) // for normal ray, return true if we pierced a triangle return pBestTriIdx != -1; else // for shadow ray, return true if we found a triangle obstructing the light. return false; } // CUDA 1.2 has no recursion - I therefore use the magic of C++ templates: // Compile-time recursion using the "depth" param! template <int depth, bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool doCulling> __device__ Pixel Raytrace( Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf, Triangle *pTriangles, Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace) { int pBestTriIdx = -1; const Triangle *pBestTri = NULL; Vector3 pointHitInWorldSpace; coord kAB=0.f, kBC=0.f, kCA=0.f; // distances from the 3 edges of the triangle (from where we hit it) // Use the surface-area heuristic based, bounding volume hierarchy of axis-aligned bounding boxes // (keywords: SAH, BVH, AABB) if (!BVH_IntersectTriangles<false,doCulling>( //pTriangles, originInWorldSpace, rayInWorldSpace, avoidSelf, pBestTriIdx, pointHitInWorldSpace, kAB, kBC, kCA)) // We pierced no triangle, return with no contribution (ambient is black) return Pixel(0.f,0.f,0.f); // Set this to pass to recursive calls below, so that we don't get self-shadow or self-reflection // from this triangle... avoidSelf = pBestTriIdx; pBestTri = &pTriangles[pBestTriIdx]; // We'll also calculate the color contributed from this intersection // Start from the triangle's color Pixel color = pBestTri->_colorf; // Phong interpolation of normal vector: these values are only set if // the doPhongInterp template param is set Vector3 phongNormal; coord ABx,BCx,CAx,area; float4 V1; float4 N1; float4 V2; float4 N2; float4 V3; float4 N3; V1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1); V2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2); V3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3); if (doPhongInterp) { // template-param, compile-time check // These are the closest triangle's vertices... N1 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx1+1); N2 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx2+1); N3 = tex1Dfetch(g_verticesTexture, 2*pBestTri->_idx3+1); const Vector3 bestTriA = Vector3(V1.x,V1.y,V1.z); const Vector3 bestTriB = Vector3(V2.x,V2.y,V2.z); const Vector3 bestTriC = Vector3(V3.x,V3.y,V3.z); // ...and their normal vectors: const Vector3 bestTriNrmA = Vector3(N1.x,N1.y,N1.z); const Vector3 bestTriNrmB = Vector3(N2.x,N2.y,N2.z); const Vector3 bestTriNrmC = Vector3(N3.x,N3.y,N3.z); // We now want to interpolate the triangle's normal, // so that as the "pointHitInWorldSpace" gets closer to // a vertex X, the interpolated normal becomes closer to bestTriNrmX, // and becomes EXACTLY bestTriNrmX, if the pointHitInWorldSpace is X. // // To do that, we use the 3 areas of the triangle, as it is divided // by the pointHitInWorldSpace. // // This is the total triangle's area: cross product of two edges // (in fact, we should divide by 2, but since we're only interested // in ratios (see below), there is no need) Vector3 AB = bestTriB; AB-= bestTriA; // edge AB Vector3 BC = bestTriC; BC-= bestTriB; // edge BC Vector3 crossAB_BC = crossCUDA(AB, BC); area = crossAB_BC.length(); // 2*area(ABC) // And these are the three sub-triangles - kAB,kBC,kCA were found above... ABx = kAB*distanceCUDA(bestTriA, bestTriB); BCx = kBC*distanceCUDA(bestTriB, bestTriC); CAx = kCA*distanceCUDA(bestTriC, bestTriA); // use the area of the sub-triangle ACROSS a point, to scale! // (which means that if pointHitInCameraSpace is exactly on a vertex, // the area of the sub-triangle becomes the area of the triangle!) Vector3 phongNormalA = bestTriNrmA; phongNormalA *= BCx / area; Vector3 phongNormalB = bestTriNrmB; phongNormalB *= CAx / area; Vector3 phongNormalC = bestTriNrmC; phongNormalC *= ABx / area; // and finally, accumulate the three contributions and normalize. phongNormal = phongNormalA + phongNormalB + phongNormalC; phongNormal.normalize(); } else phongNormal = pBestTri->_normal; #ifdef AMBIENT_OCCLUSION // Calculate ambient occlusion - throw AMBIENT_SAMPLES number of random rays // in the hemisphere formed from the pointHitInWorldSpace and the normal vector... int i=0; coord totalLight = 0.f, maxLight = 0.f; while (i<AMBIENT_SAMPLES) { Vector3 ambientRay = phongNormal; ambientRay._x += float(rand()-RAND_MAX/2)/(RAND_MAX/2); ambientRay._y += float(rand()-RAND_MAX/2)/(RAND_MAX/2); ambientRay._z += float(rand()-RAND_MAX/2)/(RAND_MAX/2); float cosangle = dotCUDA(ambientRay, phongNormal); if (cosangle<0.f) continue; i++; maxLight += cosangle; ambientRay.normalize(); Vector3 temp(pointHitInWorldSpace); temp += ambientRay*AMBIENT_RANGE; const Triangle *dummy; // Some objects needs a "nudge", to avoid self-shadowing //Vector3 nudgedPointHitInWorldSpace = pointHitInWorldSpace; //nudgedPointHitInWorldSpace += ambientRay*.005f; //if (!BVH_IntersectTriangles<true,true>( // nudgedPointHitInWorldSpace, ambientRay, avoidSelf, if (!BVH_IntersectTriangles<true,true>( pTriangles, pointHitInWorldSpace, ambientRay, avoidSelf, dummy, temp, kAB, kAB, kAB)) { // Accumulate contribution of this random ray totalLight += cosangle; } } // total ambient light, averaged over all random rays color *= (AMBIENT/255.0f)*(totalLight/maxLight); #else // Dont calculate ambient occlusion, use the pre-calculated value from the model // (assuming it exists!) coord ambientOcclusionCoeff; if (doPhongInterp) { // template-param, compile-time check // we have a phong normal, so use the subtriangle areas // to interpolate the 3 ambientOcclusionCoeff values ambientOcclusionCoeff = V1.w*BCx/area + V2.w*CAx/area + V3.w*ABx/area; /*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff*BCx/area + pVertices[pBestTri->_idx2]._ambientOcclusionCoeff*CAx/area + pVertices[pBestTri->_idx3]._ambientOcclusionCoeff*ABx/area;*/ } else { // we dont have a phong normal, just average the 3 values of the vertices ambientOcclusionCoeff = (V1.w + V2.w + V3.w)/3.f; /*pVertices[pBestTri->_idx1]._ambientOcclusionCoeff + pVertices[pBestTri->_idx2]._ambientOcclusionCoeff + pVertices[pBestTri->_idx3]._ambientOcclusionCoeff)/3.f;*/ } coord ambientFactor = (coord) ((AMBIENT*ambientOcclusionCoeff/255.0f)/255.0f); color *= ambientFactor; #endif // AMBIENT_OCCLUSION /* // Now, for all the lights... for(unsigned i=0; i<scene._lights.size(); i++) { Light& light = *scene._lights[i]; */ Vector3& light = *cudaLightPosInWorldSpace; // This light's diffuse and specular contribution Pixel dColor = Pixel(); // start with black // We calculate the vector from point hit, to light (both in world space). Vector3 pointToLight = light; pointToLight -= pointHitInWorldSpace; bool inShadow = false; if (doShadows) { // template-param, compile-time check // this is our distance from the light (squared, i.e. we didnt use an sqrt) coord distanceFromLightSq = pointToLight.lengthsq(); Vector3 shadowrayInWorldSpace = pointToLight; shadowrayInWorldSpace /= sqrt(distanceFromLightSq); int pDummy; // just to fill-in the param, not used for shadowrays if (BVH_IntersectTriangles<true,doCulling>( //pTriangles, pointHitInWorldSpace, shadowrayInWorldSpace, avoidSelf, pDummy, // dummy light, kAB, kAB, kAB)) // dummies { //continue; // we were in shadow, go to next light inShadow = true; } } if (!inShadow) { // Diffuse color pointToLight.normalize(); // vector from point to light (in world space) coord intensity = dotCUDA(phongNormal, pointToLight); if (intensity<0.f) { ; // in shadow, let it be in ambient } else { Pixel diffuse = pBestTri->_colorf; diffuse *= (coord) (DIFFUSE*intensity/255.f); // diffuse set to a maximum of 130/255 dColor += diffuse; if (doSpecular) { // template-param, compile-time check // Specular color // We will use the half vector: pointToLight + point to camera Vector3 pointToCamera = *cudaEyePosInWorldSpace; pointToCamera -= pointHitInWorldSpace; pointToCamera.normalize(); Vector3 half = pointToLight; half += pointToCamera; half.normalize(); // use the interpolated phong normal! coord intensity2 = dotCUDA(half, phongNormal); if (intensity2>0.f) { intensity2 *= intensity2; intensity2 *= intensity2; intensity2 *= intensity2; intensity2 *= intensity2; intensity2 *= intensity2; dColor += Pixel( (unsigned char)(SPECULAR*intensity2), (unsigned char)(SPECULAR*intensity2), (unsigned char)(SPECULAR*intensity2)); } } } color += dColor; } // } for each light if (!doReflections) return color; else { originInWorldSpace = pointHitInWorldSpace; const Vector3& nrm = phongNormal; float c1 = -dotCUDA(rayInWorldSpace, nrm); // Reflections: // // ray = ray - 2 (ray dot normal) normal Vector3 reflectedRay = rayInWorldSpace; reflectedRay += nrm*(2.0f*c1); reflectedRay.normalize(); return color /* use backface culling for reflection rays: <true> */ + Raytrace<depth+1, doSpecular, doPhongInterp, doReflections, doShadows, true>( originInWorldSpace, reflectedRay, avoidSelf, pTriangles, cudaEyePosInWorldSpace, cudaLightPosInWorldSpace) * REFLECTIONS_RATE /* ...but not for refraction rays: <false> REMOVED, 2011/02/04 */ ; } } // CUDA 1.2 has no recursion - I therefore use the magic of C++ templates: // Compile-time recursion using the "depth" param! // // These are the template specializations that stop the compile-time recursion // at MAX_RAY_DEPTH level. #define STOP_RECURSION(a,b,c,d,e) \ template <> \ __device__ Pixel Raytrace<MAX_RAY_DEPTH,a,b,c,d,e>( \ Vector3 originInWorldSpace, Vector3 rayInWorldSpace, int avoidSelf, \ Triangle *pTriangles, \ Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace) \ { \ return Pixel(0.f,0.f,0.f); \ } #define f false #define t true STOP_RECURSION(f,f,f,f,f) STOP_RECURSION(f,f,f,f,t) STOP_RECURSION(f,f,f,t,f) STOP_RECURSION(f,f,f,t,t) STOP_RECURSION(f,f,t,f,f) STOP_RECURSION(f,f,t,f,t) STOP_RECURSION(f,f,t,t,f) STOP_RECURSION(f,f,t,t,t) STOP_RECURSION(f,t,f,f,f) STOP_RECURSION(f,t,f,f,t) STOP_RECURSION(f,t,f,t,f) STOP_RECURSION(f,t,f,t,t) STOP_RECURSION(f,t,t,f,f) STOP_RECURSION(f,t,t,f,t) STOP_RECURSION(f,t,t,t,f) STOP_RECURSION(f,t,t,t,t) STOP_RECURSION(t,f,f,f,f) STOP_RECURSION(t,f,f,f,t) STOP_RECURSION(t,f,f,t,f) STOP_RECURSION(t,f,f,t,t) STOP_RECURSION(t,f,t,f,f) STOP_RECURSION(t,f,t,f,t) STOP_RECURSION(t,f,t,t,f) STOP_RECURSION(t,f,t,t,t) STOP_RECURSION(t,t,f,f,f) STOP_RECURSION(t,t,f,f,t) STOP_RECURSION(t,t,f,t,f) STOP_RECURSION(t,t,f,t,t) STOP_RECURSION(t,t,t,f,f) STOP_RECURSION(t,t,t,f,t) STOP_RECURSION(t,t,t,t,f) STOP_RECURSION(t,t,t,t,t) #undef f #undef t // Main CUDA kernel, templated, to support each of the desired features: // // - using specular lights or not // - doing Phong normal interpolation or not // - doing reflections or not // - doing shadows or not // - doing anti-alias or not // template <bool doSpecular, bool doPhongInterp, bool doReflections, bool doShadows, bool antialias> __global__ void CoreLoopTrianglesRaycaster( int *pixels, Matrix3 *cudaWorldToCameraSpace, Triangle *pTriangles, Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace, unsigned *cudaMortonTable) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx>=MAXX*MAXY) return; // without MORTON_ORDER // int x = idx % MAXX; // int y = idx / MAXX; // This code will not allocate idx to a normal, left-to-right, top-to-bottom // allocation of pixels. Instead, it will use the pre-calculated Morton order // of pixels in screen space, to make threads end up reading the same (hopefully) // BVH and triangle data (more co-alesced, cache-friendly accesses). int x = int(cudaMortonTable[idx] & 0xFFFF); int y = int((cudaMortonTable[idx] & 0xFFFF0000)>>16); Pixel finalColor(0,0,0); int pixelsTraced = 1; if (antialias) pixelsTraced = 4; while(pixelsTraced--) { // We will shoot a ray in camera space (from Eye to the screen point, so in camera // space, from (0,0,0) to this: coord xx = (coord)x; coord yy = (coord)y; if (antialias) { // nudge in a cross pattern around the pixel center xx += 0.25f - .5f*(pixelsTraced&1); yy += 0.25f - .5f*((pixelsTraced&2)>>1); } coord lx = coord((MAXY/2)-yy)/SCREEN_DIST; coord ly = coord(xx-(MAXX/2))/SCREEN_DIST; coord lz = 1.0f; Vector3 rayInCameraSpace(lx,ly,lz); rayInCameraSpace.normalize(); // We will need the origin in world space Vector3 originInWorldSpace = *cudaEyePosInWorldSpace; // We have a rayInCameraSpace, and we want to use the BVH, which was constructed // in World space, so we convert the ray in World space Vector3 rayInWorldSpace = cudaWorldToCameraSpace->_row1 * rayInCameraSpace._x; rayInWorldSpace += cudaWorldToCameraSpace->_row2 * rayInCameraSpace._y; rayInWorldSpace += cudaWorldToCameraSpace->_row3 * rayInCameraSpace._z; // in theory, this should not be required rayInWorldSpace.normalize(); // Primary ray, level 0, and we want backface culling: <true> finalColor += Raytrace<0, doSpecular, doPhongInterp, doReflections, doShadows, true>( originInWorldSpace, rayInWorldSpace, -1, pTriangles, cudaEyePosInWorldSpace, cudaLightPosInWorldSpace); } if (antialias) finalColor /= 4.f; if (finalColor._r>255.0f) finalColor._r=255.0f; if (finalColor._g>255.0f) finalColor._g=255.0f; if (finalColor._b>255.0f) finalColor._b=255.0f; int color = getColor(finalColor); // without MORTON_ORDER //pixels[idx] = color; pixels[y*MAXX+x] = color; } // The bridge to the normal C++ world: templated, to include only the mode-specific code in each incantation bool g_bFirstTime = true; void CudaRender( Matrix3 *cudaWorldToCameraSpace, Vertex *cudaPtrVertices, Triangle *cudaPtrTriangles, float *cudaTriangleIntersectionData, int *cudaTriIdxList, float *cudaBVHlimits, int *cudaBVHindexesOrTrilists, Vector3 *cudaEyePosInWorldSpace, Vector3 *cudaLightPosInWorldSpace, unsigned *cudaMortonTable) { if (g_bFirstTime) { // The first time any of the CudaRender templates is called, // bind the texture data! g_bFirstTime = false; cudaChannelFormatDesc channel1desc = cudaCreateChannelDesc<uint1>(); cudaBindTexture(NULL, &g_triIdxListTexture, cudaTriIdxList, &channel1desc, g_triIndexListNo*sizeof(uint1)); cudaChannelFormatDesc channel2desc = cudaCreateChannelDesc<float2>(); cudaBindTexture(NULL, &g_pCFBVHlimitsTexture, cudaBVHlimits, &channel2desc, g_pCFBVH_No*6*sizeof(float)); cudaChannelFormatDesc channel3desc = cudaCreateChannelDesc<int4>(); cudaBindTexture(NULL, &g_pCFBVHindexesOrTrilistsTexture, cudaBVHindexesOrTrilists, &channel3desc, g_pCFBVH_No*sizeof(uint4)); cudaChannelFormatDesc channel4desc = cudaCreateChannelDesc<float4>(); cudaBindTexture(NULL, &g_verticesTexture, cudaPtrVertices, &channel4desc, g_verticesNo*8*sizeof(float)); cudaChannelFormatDesc channel5desc = cudaCreateChannelDesc<float4>(); cudaBindTexture(NULL, &g_trianglesTexture, cudaTriangleIntersectionData, &channel5desc, g_trianglesNo*20*sizeof(float)); } int *pixels; glBindTexture(GL_TEXTURE_2D, tex); SAFE(cudaGLMapBufferObject((void**)&pixels, buffer)); if (g_bUsePoints) { cudaMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient int blocksVertices = (g_verticesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; CoreLoopVertices<<< blocksVertices, THREADS_PER_BLOCK >>>( pixels, cudaWorldToCameraSpace, cudaEyePosInWorldSpace); /* cudaMemset(pixels, 0x40, MAXX*MAXY*sizeof(unsigned)); // Clear all pixels to ambient int blocksTriangles = (g_trianglesNo + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; CoreLoopTriangles<<< blocksTriangles, THREADS_PER_BLOCK >>>( pixels, cudaWorldToCameraSpace, cudaPtrTriangles, cudaEyePosInWorldSpace);*/ } else { int blockPixels = (MAXY*MAXX + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; DBG_PUTS(1, "CoreLoopTrianglesRaycaster begins"); #define PAINT(bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias) \ CoreLoopTrianglesRaycaster<bDoSpecular,bDoPhongInterp,bDoReflections,bDoShadows,bDoAntialias> \ <<< blockPixels, THREADS_PER_BLOCK >>>( \ pixels, \ cudaWorldToCameraSpace, \ cudaPtrTriangles, \ cudaEyePosInWorldSpace, cudaLightPosInWorldSpace, \ cudaMortonTable); // Being templated, one can use the CoreLoopTrianglesRaycaster in any form one wants. // In theory, C++ compilers don't need this expansion (which I wrote with a simple Python script) // Unfortunately, we can't pass runtime vars in template params, not even when they are enumerants // or booleans... PAINT( true , true , true , true , true ) } cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } SAFE(cudaThreadSynchronize()); SAFE(cudaGLUnmapBufferObject(buffer)); // Use OpenGL texture to display the generated frame at lightning speed // (the PBO buffer is already on the card, no useless PCI bus criss-cross) glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, MAXX, MAXY, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glEnable(GL_TEXTURE_2D); glBegin(GL_QUADS); glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, -1.0f); glTexCoord2f(1.0f, 1.0f); glVertex2f(1.0f, -1.0f); glTexCoord2f(1.0f, 0.0f); glVertex2f(1.0f, 1.0f); glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, 1.0f); glEnd(); // hack: extern bool g_benchmark; if (!g_benchmark) { // Display the "Press H for help" message glDisable(GL_LIGHTING); glDisable(GL_TEXTURE_2D); glColor3f(1.f, 1.f, 1.f); glRasterPos2f(-0.95, 0.9); const char *help = "Press H for help"; for(unsigned o=0;o<strlen(help); o++) glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, help[o]); } SDL_GL_SwapBuffers(); }
the_stack
#if ( MODEL == HYDRO && defined GRAVITY ) // include c_ExtAcc_AuxArray[] #ifdef __CUDACC__ #include "CUDA_ConstMemory.h" #endif //----------------------------------------------------------------------------------------- // Function : CPU/CUPOT_HydroGravitySolver // Description : Advances the momentum and energy density of a group of patches by gravitational acceleration // (including external gravity) // // Note : 1. Currently this function does NOT ensure the consistency between internal energy and // dual-energy variable (e.g., entropy) // --> This consistency breaks only for cells with the dual-energy status labelled // as DE_UPDATED_BY_ETOT_GRA // --> We restore this consistency in Gra_Close() // 2. Arrays with a prefix "g_" are stored in the global memory of GPU // // Parameter : g_Flu_Array_New : Array to store the input and output fluid variables // g_Pot_Array_New : Array storing the input potential (at the current step) // --> _New: to be distinguishable from g_Pot_Array_USG[], which is defined at the previous step // g_Corner_Array : Array storing the physical corner coordinates of each patch // g_Pot_Array_USG : Array storing the input potential for UNSPLIT_GRAVITY (at the previous step) // g_Flu_Array_USG : Array storing the input density + momentum for UNSPLIT_GRAVITY (at the previous step) // g_DE_Array : Array storing the dual-energy status (for both input and output) // g_Emag_Array : Array storing the cell-centered magnetic energy // --> Only for checking minimum internal energy in MHD // NPatchGroup : Number of input patch groups (for CPU only) // dt : Time interval to advance solution // dh : Cell size // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // UsePot : Add self-gravity and/or external potential // ExtAcc : Add external acceleration // ExtAcc_Func : Function pointer to the external acceleration routine (for both CPU and GPU) // c_ExtAcc_AuxArray : Auxiliary array for adding external acceleration (for CPU only) // --> When using GPU, this array is stored in the constant memory header // CUDA_ConstMemory.h and does not need to be passed as a function argument // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // MinEint : Internal energy floor // // Return : g_Flu_Array_New, g_DE_Array //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ) #else void CPU_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const int NPatchGroup, const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double c_ExtAcc_AuxArray[], const double TimeNew, const double TimeOld, const real MinEint ) #endif { // check # ifdef GAMER_DEBUG if ( ExtAcc && TimeNew < 0.0 ) printf( "ERROR : incorrect TimeNew (%14.7e) !!\n", TimeNew ); # ifdef UNSPLIT_GRAVITY if ( g_Flu_Array_USG == NULL ) printf( "ERROR : g_Flu_Array_USG == NULL !!\n" ); if ( UsePot && g_Pot_Array_USG == NULL ) printf( "ERROR : g_Pot_Array_USG == NULL !!\n" ); if ( ExtAcc && ( TimeOld >= TimeNew || TimeOld < 0.0 ) ) printf( "ERROR : incorrect time (TimeOld %14.7e, TimeNew = %14.7e) !!\n", TimeOld, TimeNew ); # endif # ifdef DUAL_ENERGY if ( g_DE_Array == NULL ) printf( "ERROR : g_DE_Array == NULL !!\n" ); # endif # ifdef MHD if ( g_Emag_Array == NULL ) printf( "ERROR : g_Emag_Array == NULL !!\n" ); # endif # endif // #ifdef GAMER_DEBUG const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh) : -dt/(2.0*dh); const int PS1_sqr = SQR(PS1); const int didx_new[3] = { 1, GRA_NXT, SQR(GRA_NXT) }; # ifdef UNSPLIT_GRAVITY const int didx_old[3] = { 1, USG_NXT_G, SQR(USG_NXT_G) }; # endif // load potential from global to shared memory to improve the GPU performance # ifdef __CUDACC__ __shared__ real s_pot_new[ CUBE(GRA_NXT) ]; # ifdef UNSPLIT_GRAVITY __shared__ real s_pot_old[ CUBE(USG_NXT_G) ]; # endif if ( UsePot ) { for (int t=threadIdx.x; t<CUBE(GRA_NXT); t+=GRA_BLOCK_SIZE) s_pot_new[t] = g_Pot_Array_New[blockIdx.x][t]; # ifdef UNSPLIT_GRAVITY for (int t=threadIdx.x; t<CUBE(USG_NXT_G); t+=GRA_BLOCK_SIZE) s_pot_old[t] = g_Pot_Array_USG[blockIdx.x][t]; # endif } __syncthreads(); # endif // #ifdef __CUDACC__ // loop over all patches // --> CPU/GPU solver: use different (OpenMP threads) / (CUDA thread blocks) // to work on different patches # ifdef __CUDACC__ const int P = blockIdx.x; # else # pragma omp parallel for schedule( runtime ) for (int P=0; P<NPatchGroup*8; P++) # endif { // point to the potential array of the target patch # ifdef __CUDACC__ const real *const pot_new = s_pot_new; # ifdef UNSPLIT_GRAVITY const real *const pot_old = s_pot_old; # endif # else // #ifdef __CUDACC__ const real *const pot_new = g_Pot_Array_New[P]; # ifdef UNSPLIT_GRAVITY const real *const pot_old = g_Pot_Array_USG[P]; # endif # endif // #ifdef __CUDACC__ ... else ... // loop over all cells of the target patch // _g0: indices for the arrays without any ghost zone CGPU_LOOP( idx_g0, CUBE(PS1) ) { // Enki = non-kinetic energy (i.e. Etot - Ekin) real acc_new[3]={0.0, 0.0, 0.0}, px_new, py_new, pz_new, rho_new, Enki_in, Ekin_out, Etot_in, Etot_out, _rho2; # ifdef UNSPLIT_GRAVITY real acc_old[3]={0.0, 0.0, 0.0}, px_old, py_old, pz_old, rho_old, Emag_in=0.0; # endif const int i_g0 = idx_g0 % PS1; const int j_g0 = idx_g0 % PS1_sqr / PS1; const int k_g0 = idx_g0 / PS1_sqr; const int i_new = i_g0 + GRA_GHOST_SIZE; const int j_new = j_g0 + GRA_GHOST_SIZE; const int k_new = k_g0 + GRA_GHOST_SIZE; const int idx_new = IDX321( i_new, j_new, k_new, GRA_NXT, GRA_NXT ); # ifdef UNSPLIT_GRAVITY const int i_old = i_g0 + USG_GHOST_SIZE_G; const int j_old = j_g0 + USG_GHOST_SIZE_G; const int k_old = k_g0 + USG_GHOST_SIZE_G; const int idx_old = IDX321( i_old, j_old, k_old, USG_NXT_G, USG_NXT_G ); # endif // external acceleration if ( ExtAcc ) { double x, y, z; x = g_Corner_Array[P][0] + (double)(i_g0*dh); y = g_Corner_Array[P][1] + (double)(j_g0*dh); z = g_Corner_Array[P][2] + (double)(k_g0*dh); ExtAcc_Func( acc_new, x, y, z, TimeNew, c_ExtAcc_AuxArray ); for (int d=0; d<3; d++) acc_new[d] *= dt; # ifdef UNSPLIT_GRAVITY ExtAcc_Func( acc_old, x, y, z, TimeOld, c_ExtAcc_AuxArray ); for (int d=0; d<3; d++) acc_old[d] *= dt; # endif } // self-gravity and external potential if ( UsePot ) { const int ip1_new = idx_new + didx_new[0]; const int jp1_new = idx_new + didx_new[1]; const int kp1_new = idx_new + didx_new[2]; const int im1_new = idx_new - didx_new[0]; const int jm1_new = idx_new - didx_new[1]; const int km1_new = idx_new - didx_new[2]; # ifdef UNSPLIT_GRAVITY const int ip1_old = idx_old + didx_old[0]; const int jp1_old = idx_old + didx_old[1]; const int kp1_old = idx_old + didx_old[2]; const int im1_old = idx_old - didx_old[0]; const int jm1_old = idx_old - didx_old[1]; const int km1_old = idx_old - didx_old[2]; # endif if ( P5_Gradient ) { const real Const_8 = (real)8.0; const int ip2_new = ip1_new + didx_new[0]; const int jp2_new = jp1_new + didx_new[1]; const int kp2_new = kp1_new + didx_new[2]; const int im2_new = im1_new - didx_new[0]; const int jm2_new = jm1_new - didx_new[1]; const int km2_new = km1_new - didx_new[2]; # ifdef UNSPLIT_GRAVITY const int ip2_old = ip1_old + didx_old[0]; const int jp2_old = jp1_old + didx_old[1]; const int kp2_old = kp1_old + didx_old[2]; const int im2_old = im1_old - didx_old[0]; const int jm2_old = jm1_old - didx_old[1]; const int km2_old = km1_old - didx_old[2]; # endif acc_new[0] += Gra_Const*( - pot_new[ip2_new] + Const_8*pot_new[ip1_new] - Const_8*pot_new[im1_new] + pot_new[im2_new] ); acc_new[1] += Gra_Const*( - pot_new[jp2_new] + Const_8*pot_new[jp1_new] - Const_8*pot_new[jm1_new] + pot_new[jm2_new] ); acc_new[2] += Gra_Const*( - pot_new[kp2_new] + Const_8*pot_new[kp1_new] - Const_8*pot_new[km1_new] + pot_new[km2_new] ); # ifdef UNSPLIT_GRAVITY acc_old[0] += Gra_Const*( - pot_old[ip2_old] + Const_8*pot_old[ip1_old] - Const_8*pot_old[im1_old] + pot_old[im2_old] ); acc_old[1] += Gra_Const*( - pot_old[jp2_old] + Const_8*pot_old[jp1_old] - Const_8*pot_old[jm1_old] + pot_old[jm2_old] ); acc_old[2] += Gra_Const*( - pot_old[kp2_old] + Const_8*pot_old[kp1_old] - Const_8*pot_old[km1_old] + pot_old[km2_old] ); # endif } // if ( P5_Gradient ) else { acc_new[0] += Gra_Const*( pot_new[ip1_new] - pot_new[im1_new] ); acc_new[1] += Gra_Const*( pot_new[jp1_new] - pot_new[jm1_new] ); acc_new[2] += Gra_Const*( pot_new[kp1_new] - pot_new[km1_new] ); # ifdef UNSPLIT_GRAVITY acc_old[0] += Gra_Const*( pot_old[ip1_old] - pot_old[im1_old] ); acc_old[1] += Gra_Const*( pot_old[jp1_old] - pot_old[jm1_old] ); acc_old[2] += Gra_Const*( pot_old[kp1_old] - pot_old[km1_old] ); # endif } // if ( P5_Gradient ) ... else ... } // if ( UsePot ) // advance fluid # ifdef UNSPLIT_GRAVITY rho_new = g_Flu_Array_New[P][DENS][idx_g0]; rho_old = g_Flu_Array_USG[P][DENS][idx_g0]; px_new = g_Flu_Array_New[P][MOMX][idx_g0]; px_old = g_Flu_Array_USG[P][MOMX][idx_g0]; py_new = g_Flu_Array_New[P][MOMY][idx_g0]; py_old = g_Flu_Array_USG[P][MOMY][idx_g0]; pz_new = g_Flu_Array_New[P][MOMZ][idx_g0]; pz_old = g_Flu_Array_USG[P][MOMZ][idx_g0]; // backup the original non-kinetic energy so that we can restore it later if necessary _rho2 = (real)0.5/rho_new; Etot_in = g_Flu_Array_New[P][ENGY][idx_g0]; Enki_in = Etot_in - _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); # ifdef MHD Emag_in = g_Emag_Array[P][idx_g0]; # endif // update the momentum density px_new += (real)0.5*( rho_old*acc_old[0] + rho_new*acc_new[0] ); py_new += (real)0.5*( rho_old*acc_old[1] + rho_new*acc_new[1] ); pz_new += (real)0.5*( rho_old*acc_old[2] + rho_new*acc_new[2] ); g_Flu_Array_New[P][MOMX][idx_g0] = px_new; g_Flu_Array_New[P][MOMY][idx_g0] = py_new; g_Flu_Array_New[P][MOMZ][idx_g0] = pz_new; // record the updated kinematic energy density Ekin_out = _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); // update the total energy density # ifdef DUAL_ENERGY // for the unsplitting method with the dual-energy formalism, we correct the **total energy density** // only if the dual-energy status != DE_UPDATED_BY_DUAL // --> for (a) DE_UPDATED_BY_DUAL --> Eint has been updated by the dual-energy variable // (b) DE_UPDATED_BY_MIN_PRES --> Eint has been set to the minimum threshold // --> currently for (b) we still update the total energy density if ( g_DE_Array[P][idx_g0] == DE_UPDATED_BY_DUAL ) { // fix the internal energy and the dual-energy variable Etot_out = Enki_in + Ekin_out; } else { // update the total energy, where internal energy and dual-energy variable may change as well Etot_out = Etot_in + (real)0.5*( px_old*acc_old[0] + py_old*acc_old[1] + pz_old*acc_old[2] + px_new*acc_new[0] + py_new*acc_new[1] + pz_new*acc_new[2] ); // check the minimum internal energy //###NOTE: assuming Etot = Eint + Ekin + Emag // (a) if the updated internal energy is greater than the threshold, set the dual-energy status == DE_UPDATED_BY_ETOT_GRA if ( Etot_out - Ekin_out - Emag_in >= MinEint ) g_DE_Array[P][idx_g0] = DE_UPDATED_BY_ETOT_GRA; // (b) otherwise restore the original internal energy and keep the original dual-energy status else Etot_out = Enki_in + Ekin_out; } # else // # ifdef DUAL_ENERGY // for the unsplitting method without the dual-energy formalism, we always correct the total energy density // instead of the kinematic energy density // --> internal energy may change // --> we must check the minimum internal energy after this update Etot_out = Etot_in + (real)0.5*( px_old*acc_old[0] + py_old*acc_old[1] + pz_old*acc_old[2] + px_new*acc_new[0] + py_new*acc_new[1] + pz_new*acc_new[2] ); // check the minimum internal energy // --> restore the original internal energy if the updated value becomes smaller than the threshold if ( Etot_out - Ekin_out - Emag_in < MinEint ) Etot_out = Enki_in + Ekin_out; # endif // #ifdef DUAL_ENERGY ... else ... # else // #ifdef UNSPLIT_GRAVITY rho_new = g_Flu_Array_New[P][DENS][idx_g0]; px_new = g_Flu_Array_New[P][MOMX][idx_g0]; py_new = g_Flu_Array_New[P][MOMY][idx_g0]; pz_new = g_Flu_Array_New[P][MOMZ][idx_g0]; // backup the original internal energy so that we can restore it later _rho2 = (real)0.5/rho_new; Etot_in = g_Flu_Array_New[P][ENGY][idx_g0]; Enki_in = Etot_in - _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); // update the momentum density px_new += rho_new*acc_new[0]; py_new += rho_new*acc_new[1]; pz_new += rho_new*acc_new[2]; g_Flu_Array_New[P][MOMX][idx_g0] = px_new; g_Flu_Array_New[P][MOMY][idx_g0] = py_new; g_Flu_Array_New[P][MOMZ][idx_g0] = pz_new; // for the splitting method, we ensure that the internal energy is unchanged Ekin_out = _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); Etot_out = Enki_in + Ekin_out; # endif // #ifdef UNSPLIT_GRAVITY ... else ... // store the updated total energy density to the output array g_Flu_Array_New[P][ENGY][idx_g0] = Etot_out; } // CGPU_LOOP( idx_g0, CUBE(PS1) ) } // for (int P=0; P<NPatchGroup*8; P++) } // FUNCTION : CPU/CUPOT_HydroGravitySolver #endif // #if ( MODEL == HYDRO && defined GRAVITY )
the_stack
#pragma once #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace lp { /** * @brief Speciflying parameters for LP Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_problem(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(gunrock::app::UseParameters_problem(parameters)); return retval; } /** * @brief Label Propagation Problem structure * @tparam _GraphT Type of the graph * @tparam _LabelT Type of labels used in sssp * @tparam _ValueT Type of per-vertex distance values * @tparam _FLAG Problem flags */ template <typename _GraphT, typename _LabelT = typename _GraphT::VertexT, typename _ValueT = typename _GraphT::ValueT, ProblemFlag _FLAG = Problem_None> struct Problem : ProblemBase<_GraphT, _FLAG> { typedef _GraphT GraphT; static const ProblemFlag FLAG = _FLAG; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::CscT CscT; typedef typename GraphT::GpT GpT; typedef _LabelT LabelT; typedef _ValueT ValueT; typedef unsigned char MaskT; typedef ProblemBase<GraphT, FLAG> BaseProblem; typedef DataSliceBase<GraphT, FLAG> BaseDataSlice; // Helper structures /** * @brief Data slice structure containing LP-specific data on indiviual GPU */ struct DataSlice : BaseDataSlice { util::Array1D<SizeT, LabelT> labels; // labels (in the current iteration) for each vertex util::Array1D<SizeT, LabelT> old_labels; // labels (in the previous iteration) for each vertex util::Array1D<SizeT, SizeT> vertex_markers[2]; util::Array1D<SizeT, SizeT, util::PINNED> split_lengths; util::Array1D<SizeT, VertexT> local_vertices; util::Array1D<SizeT, MaskT> visited_masks; util::Array1D<SizeT, MaskT> old_mask; util::Array1D<SizeT, VertexT> unvisited_vertices[2]; util::Array1D<SizeT, MaskT *> in_masks; util::Array1D<SizeT, LabelT> neighbour_labels; util::Array1D<SizeT, int> neighbour_labels_size; // segments_temp stores the relative segments, and segments stores the (cumulative scan) absolute segments util::Array1D<SizeT, int> segments; util::Array1D<SizeT, int> segments_temp; util::Array1D<SizeT, int> segments_size; util::Array1D<uint64_t, char> cub_temp_storage; SizeT num_visited_vertices, num_unvisited_vertices; bool been_in_backward; util::Array1D<SizeT, int> visited; /* * @brief Default constructor */ DataSlice() : BaseDataSlice() { labels.SetName("labels"); old_labels.SetName("old_labels"); vertex_markers[0].SetName("vertex_markers[0]"); vertex_markers[1].SetName("vertex_markers[1]"); unvisited_vertices[0].SetName("unvisited_vertices[0]"); unvisited_vertices[1].SetName("unvisited_vertices[1]"); local_vertices.SetName("local_vertices"); split_lengths.SetName("split_length"); visited_masks.SetName("visited_masks"); old_mask.SetName("old_mask"); in_masks.SetName("in_masks"); neighbour_labels.SetName("neighbour_labels"); neighbour_labels_size.SetName("neighbour_labels_size"); segments.SetName("segments"); segments_size.SetName("segments_size"); segments_temp.SetName("segments_temp"); cub_temp_storage.SetName("cub_temp_storage"); visited.SetName("visited"); } /* * @brief Default destructor */ virtual ~DataSlice() { Release(); } cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx)); GUARD_CU(visited.Release(target)); GUARD_CU(labels.Release(target)); GUARD_CU(old_labels.Release(target)); GUARD_CU(vertex_markers[0].Release(target)); GUARD_CU(vertex_markers[1].Release(target)); GUARD_CU(split_lengths.Release(target)); GUARD_CU(local_vertices.Release(target)); GUARD_CU(visited_masks.Release(target)); GUARD_CU(unvisited_vertices[0].Release(target)); GUARD_CU(unvisited_vertices[1].Release(target)); GUARD_CU(old_mask.Release(target)); GUARD_CU(in_masks.Release(target)); GUARD_CU(in_masks.Release(target)); GUARD_CU(neighbour_labels.Release(target)); GUARD_CU(neighbour_labels_size.Release(target)); GUARD_CU(segments_size.Release(target)); GUARD_CU(segments.Release(target)); GUARD_CU(segments_temp.Release(target)); GUARD_CU(cub_temp_storage.Release(target)); GUARD_CU(BaseDataSlice ::Release(target)); return retval; } /** * @brief initializing lp-specific data on each gpu * @param sub_graph Sub graph on the GPU. * @param[in] num_gpus Number of GPUs * @param[in] gpu_idx GPU device index * @param[in] target Targeting device location * @param[in] flag Problem flag containling options * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0, util::Location target = util::DEVICE, ProblemFlag flag = Problem_None) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag)); GUARD_CU(labels.Allocate(sub_graph.nodes, target)); GUARD_CU(old_labels.Allocate(sub_graph.nodes, target)); GUARD_CU(segments.Allocate(sub_graph.nodes, target)); GUARD_CU(segments_temp.Allocate(sub_graph.nodes, target)); GUARD_CU(segments_size.Allocate(1, target)); GUARD_CU(neighbour_labels_size.Allocate(1, util::DEVICE | util::HOST)); GUARD_CU(cub_temp_storage.Allocate(1, target)); GUARD_CU(visited.Allocate(sub_graph.nodes, target)); GUARD_CU(neighbour_labels.Allocate(sub_graph.edges+1, target)); GUARD_CU(split_lengths.Allocate(2, util::HOST | target)); if (num_gpus > 1) { SizeT local_counter = 0; for (VertexT v = 0; v < sub_graph.nodes; v++) if (sub_graph.GpT::partition_table[v] == 0) local_counter++; GUARD_CU(local_vertices.Allocate(local_counter, util::HOST | target)); local_counter = 0; for (VertexT v = 0; v < sub_graph.nodes; v++) { if (sub_graph.GpT::partition_table[v] == 0) { local_vertices[local_counter] = v; local_counter++; } } GUARD_CU(local_vertices.Move(util::HOST, target)); } GUARD_CU(sub_graph.Move(util::HOST, target, this->stream)); return retval; } // end of Init /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->sub_graph->nodes; been_in_backward = false; GUARD_CU(util::SetDevice(this->gpu_idx)); // Allocate output labels if necessary GUARD_CU(labels.EnsureSize_(nodes, target)); GUARD_CU(visited.EnsureSize_(nodes, target)); GUARD_CU(visited.ForEach( [] __host__ __device__(int &x) { x = (int)0; }, nodes, target, this->stream)); GUARD_CU(neighbour_labels_size.ForEach( [] __host__ __device__(int &x) { x = (int)0; }, nodes, target, this->stream)); GUARD_CU(segments_size.ForEach( [] __host__ __device__(int &x) { x = (int)0; }, nodes, target, this->stream)); return retval; } // end of Reset }; // end of DataSlice // Members // Set of data slices (one for each GPU) util::Array1D<SizeT, DataSlice> *data_slices; // Methods /** * @brief LPProblem default constructor */ Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None) : BaseProblem(_parameters, _flag), data_slices(NULL) {} /** * @brief LPProblem default destructor */ virtual ~Problem() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (data_slices == NULL) return retval; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(data_slices[gpu].Release(target)); } if ((target & util::HOST) != 0 && data_slices[0].GetPointer(util::DEVICE) == NULL) { delete[] data_slices; data_slices = NULL; } GUARD_CU(BaseProblem::Release(target)); return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Copy result labels computed on the GPU back to *host-side vectors. * @param[out] h_labels Host array to store computed vertex labels * @param[in] target where the results are stored * \return cudaError_t Error message(s), if any */ cudaError_t Extract(LabelT *h_labels, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->org_graph->nodes; if (this->num_gpus == 1) { auto &data_slice = data_slices[0][0]; if (target == util::DEVICE) { // Set device GUARD_CU(util::SetDevice(this->gpu_idx[0])); GUARD_CU(data_slice.labels.SetPointer(h_labels, nodes, util::HOST)); GUARD_CU(data_slice.labels.Move(util::DEVICE, util::HOST)); } else if (target == util::HOST) { GUARD_CU(data_slice.labels.ForAll( [h_labels] __host__ __device__(const LabelT *labels, const VertexT &v) { h_labels[v] = labels[v]; }, nodes, util::HOST)); } } else { // num_gpus != 1 util::Array1D<SizeT, LabelT *> th_labels; th_labels.SetName("lp::Problem::Extract::th_labels"); GUARD_CU(th_labels.Allocate(this->num_gpus, util::HOST)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { auto &data_slice = data_slices[gpu][0]; if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slice.labels.Move(util::DEVICE, util::HOST)); } th_labels[gpu] = data_slice.labels.GetPointer(util::HOST); } // end for(gpu) for (VertexT v = 0; v < nodes; v++) { int gpu = this->org_graph->GpT::partition_table[v]; VertexT v_ = v; if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) == 0) v_ = this->org_graph->GpT::convertion_table[v]; h_labels[v] = th_labels[gpu][v_]; } GUARD_CU(th_labels.Release()); } // end if (num_gpus ==1) return retval; } /** * @brief initialization function. * @param graph The graph that LP processes on * @param[in] Location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseProblem::Init(graph, target)); data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]"); if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST)); auto &data_slice = data_slices[gpu][0]; GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus, this->gpu_idx[gpu], target, this->flag)); } // end for(gpu) return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; for (int gpu = 0; gpu < this->num_gpus; ++gpu) { // Set device if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu]->Reset(target)); GUARD_CU(data_slices[gpu].Move(util::HOST, target)); } // Fillin the initial input_queue int gpu; VertexT src_; if (this->num_gpus <= 1) { gpu = 0; src_ = src; } else { gpu = this->org_graph->partition_table[src]; if (this->flag & partitioner::Keep_Node_Num) src_ = src; else src_ = this->org_graph->GpT::convertion_table[src]; } if (target & util::HOST) { data_slices[gpu]->labels[src_] = 0; } printf("This is where the util:DEVICE=2 is checked with target %d\n", target); if (target & util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); GUARD_CU(data_slices[gpu]->labels.SetIdx()); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } return retval; } // end of reset /** @} */ }; // end of problem } // namespace lp } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "gpu/image/imgInsert.hpp" #include "../deviceBuffer.hpp" #include "../deviceStream.hpp" #include "backend/common/imageOps.hpp" #include "cuda/util.hpp" #include <cuda_runtime.h> #include <cassert> const unsigned int CudaBlockSize = 16; namespace VideoStitch { namespace Image { /** * This kernel inserts the content of the (packed) image @src at offset (@offsetX, offsetY) into @dest. * On overflow, the image wraps if hWrap (resp vWrap) is true. Else nothing is written. * Pixels with zero alpha are not merged. * 2D version: We assume that the @src (but not the dst) image is divisible * by the block size on each dimension. */ #define DEFINE_IMGIIK(funcName, testPredicate, dstIndexComputation) \ __global__ void funcName(uint32_t* __restrict__ dst, unsigned dstWidth, unsigned dstHeight, \ const uint32_t* __restrict__ src, unsigned srcWidth, unsigned srcHeight, unsigned offsetX, \ unsigned offsetY) { \ unsigned srcX = blockIdx.x * blockDim.x + threadIdx.x; \ unsigned srcY = blockIdx.y * blockDim.y + threadIdx.y; \ \ unsigned dstX = srcX + offsetX; \ unsigned dstY = srcY + offsetY; \ \ if (srcX < srcWidth && srcY < srcHeight) { \ uint32_t p = src[srcWidth * srcY + srcX]; \ if (testPredicate) { \ dst[dstIndexComputation] = p; \ } \ } \ } /** * Same as above, except that @mask is used for blending. */ #define DEFINE_IMGIIKM(funcName, testPredicate, dstIndexComputation) \ template <typename PixelType> \ __global__ void funcName(uint32_t* __restrict__ dst, unsigned dstWidth, unsigned dstHeight, \ const uint32_t* __restrict__ src, unsigned srcWidth, unsigned srcHeight, unsigned offsetX, \ unsigned offsetY, const unsigned char* __restrict__ mask) { \ unsigned srcX = blockIdx.x * blockDim.x + threadIdx.x; \ unsigned srcY = blockIdx.y * blockDim.y + threadIdx.y; \ \ unsigned dstX = srcX + offsetX; \ unsigned dstY = srcY + offsetY; \ \ if (srcX < srcWidth && srcY < srcHeight) { \ unsigned srcIndex = srcWidth * srcY + srcX; \ uint32_t p = src[srcIndex]; \ if (testPredicate) { \ unsigned dstIndex = dstIndexComputation; \ uint32_t q = dst[dstIndex]; \ if (PixelType::a(q)) { \ int32_t m = mask[srcIndex]; \ uint32_t mR = (m * PixelType::r(p) + (255 - m) * PixelType::r(q)) / 255; \ uint32_t mG = (m * PixelType::g(p) + (255 - m) * PixelType::g(q)) / 255; \ uint32_t mB = (m * PixelType::b(p) + (255 - m) * PixelType::b(q)) / 255; \ q = PixelType::pack(mR, mG, mB, 0xff); \ } else { \ if (mask[srcIndex]) { \ q = p; \ } \ } \ dst[dstIndex] = q; \ } \ } \ } DEFINE_IMGIIKM(imgInsertIntoKernelMaskedNoWrap, RGB210::a(p) && dstX < dstWidth && dstY < dstHeight, dstWidth* dstY + dstX) DEFINE_IMGIIK(imgInsertIntoKernelNoWrap, RGB210::a(p) && dstX < dstWidth && dstY < dstHeight, dstWidth* dstY + dstX) DEFINE_IMGIIKM(imgInsertIntoKernelMaskedHWrap, RGB210::a(p) && dstY < dstHeight, dstWidth* dstY + (dstX % dstWidth)) DEFINE_IMGIIK(imgInsertIntoKernelHWrap, RGB210::a(p) && dstY < dstHeight, dstWidth* dstY + (dstX % dstWidth)) DEFINE_IMGIIKM(imgInsertIntoKernelMaskedVWrap, RGB210::a(p) && dstX < dstWidth, dstWidth*(dstY % dstHeight) + dstX) DEFINE_IMGIIK(imgInsertIntoKernelVWrap, RGB210::a(p) && dstX < dstWidth, dstWidth*(dstY % dstHeight) + dstX) template <typename PixelType> Status imgInsertInto(GPU::Buffer<uint32_t> dst, std::size_t dstWidth, std::size_t dstHeight, GPU::Buffer<const uint32_t> src, std::size_t srcWidth, std::size_t srcHeight, std::size_t offsetX, std::size_t offsetY, GPU::Buffer<const unsigned char> mask, bool hWrap, bool vWrap, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(srcWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(srcHeight, dimBlock.y), 1); if (mask.wasAllocated()) { if (hWrap) { if (vWrap) { assert(false); } else { imgInsertIntoKernelMaskedHWrap<PixelType><<<dimGrid, dimBlock, 0, stream>>>( dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY, mask.get()); } } else { if (vWrap) { imgInsertIntoKernelMaskedVWrap<PixelType><<<dimGrid, dimBlock, 0, stream>>>( dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY, mask.get()); } else { imgInsertIntoKernelMaskedNoWrap<PixelType><<<dimGrid, dimBlock, 0, stream>>>( dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY, mask.get()); } } } else { if (hWrap) { if (vWrap) { assert(false); } else { imgInsertIntoKernelHWrap<<<dimGrid, dimBlock, 0, stream>>>(dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY); } } else { if (vWrap) { imgInsertIntoKernelVWrap<<<dimGrid, dimBlock, 0, stream>>>(dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY); } else { imgInsertIntoKernelNoWrap<<<dimGrid, dimBlock, 0, stream>>>(dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY); } } } return CUDA_STATUS; } Status imgInsertInto(GPU::Buffer<uint32_t> dst, std::size_t dstWidth, std::size_t dstHeight, GPU::Buffer<const uint32_t> src, std::size_t srcWidth, std::size_t srcHeight, std::size_t offsetX, std::size_t offsetY, GPU::Buffer<const unsigned char> mask, bool hWrap, bool vWrap, GPU::Stream gpuStream) { return imgInsertInto<Image::RGBA>(dst, dstWidth, dstHeight, src, srcWidth, srcHeight, offsetX, offsetY, mask, hWrap, vWrap, gpuStream); } Status imgInsertInto10bit(GPU::Buffer<uint32_t> dst, std::size_t dstWidth, std::size_t dstHeight, GPU::Buffer<const uint32_t> src, std::size_t srcWidth, std::size_t srcHeight, std::size_t offsetX, std::size_t offsetY, GPU::Buffer<const unsigned char> mask, bool hWrap, bool vWrap, GPU::Stream gpuStream) { return imgInsertInto<Image::RGB210>(dst, dstWidth, dstHeight, src, srcWidth, srcHeight, offsetX, offsetY, mask, hWrap, vWrap, gpuStream); } } // namespace Image } // namespace VideoStitch
the_stack
#include "cuda/Complex.cuh" #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include <algorithm> #include <cuda_runtime.h> #include <glog/logging.h> namespace facebook { namespace cuda { namespace detail { __device__ __forceinline__ constexpr int max(int i, int j) { return (i < j) ? j : i; } __device__ __forceinline__ constexpr int max(int i, int j, int k) { return facebook::cuda::detail::max(facebook::cuda::detail::max(i, j), k); } __device__ __forceinline__ Complex ldg(const Complex* p) { return Complex(__ldg((const float2*)p)); } __device__ __forceinline__ void ldg(Complex& c1, Complex&c2, const Complex* p) { const float4 f = __ldg((const float4*)p); c1 = Complex(f.x, f.y); c2 = Complex(f.z, f.w); } template <bool ConjugateTransposeA, bool ConjugateTransposeB, int FFTSize, int FFTElements, int TileI, int TileJ, int TileK, int TileIThreadIdxY, int TileJThreadIdxZ, bool Accumulate> __launch_bounds__(32 * 4 * 2, 2) // 128 registers on K40 __global__ void transposeMMTiledKernelSmall(const DeviceTensor<Complex, 3> A, const DeviceTensor<Complex, 3> B, DeviceTensor<Complex, 3> C, Complex invNorm) { const auto xyBase = blockIdx.z * blockDim.x; const auto xy = blockIdx.z * blockDim.x + threadIdx.x; const int numRed = (ConjugateTransposeA) ? A.getSize(0) : A.getSize(1); // Conditions must hold for float4 implementation to be valid assert(xy < FFTSize * (FFTSize / 2 + 1)); assert(FFTElements == blockDim.x); assert(TileIThreadIdxY == blockDim.y); assert(TileJThreadIdxZ == blockDim.z); assert(numRed % TileK == 0); Complex c[TileI][TileJ]; // for (int i = TileI * blockIdx.x; i < C.getSize(0); i += TileI * gridDim.x) { // for (int j = TileJ * blockIdx.y; j < C.getSize(1); j += TileJ * gridDim.y) { { { // blockIdx.x/y are the ceils int i = TileI * (threadIdx.y + blockDim.y * blockIdx.x); int j = TileJ * (threadIdx.z + blockDim.z * blockIdx.y); // Guard against overflows assert(i + TileI <= C.getSize(0)); assert(j + TileJ <= C.getSize(1)); for (int ii = 0; ii < TileI; ++ii) { for (int jj = 0; jj < TileJ; ++jj) { c[ii][jj] = (Accumulate) ? C[i + ii][j + jj][xy] : Complex(0.0f); } } for (int k = 0; k < numRed; k += TileK) { Complex a[TileK][TileI]; Complex b[TileK][TileJ]; __shared__ Complex swap [TileJThreadIdxZ] [TileIThreadIdxY] [facebook::cuda::detail::max(TileI, TileJ, TileK)] [2] [FFTElements]; // View float2[2][FFTElements] as float4[FFTElements], let the // compiler worry about the indexing. auto swapViewFloat4 = (float4(*) [TileIThreadIdxY] [facebook::cuda::detail::max(TileI, TileJ, TileK)] [FFTElements])swap; // Illustration with blockDim.x == 8 // Goal // th 0 1 2 3 4 5 6 7 // a A0 A1 A2 A3 A4 A5 A6 A7 // b B0 B1 B2 B3 B4 B5 B6 B7 // // Threads < blockDim.x / 2 load A0 - A7 into shared float4 // Threads >= blockDim.x / 2 load B0 - B7 into shared float4 // Actual // s A0/A1 A2/A3 A4/A5 A6/A7 | B0/B1 B2/B3 B4/B5 B6/B7 const auto xdim = (threadIdx.x < blockDim.x / 2) ? xyBase + 2 * threadIdx.x : xyBase + 2 * (threadIdx.x - blockDim.x / 2); for (int kk = 0; kk < TileK; ++kk) { // This statically unrolls for max(TileI, TileJ, TileK) and computes // a base pointer for Threads < blockDim.x / 2 and // Threads >= blockDim.x / 2 // If there is imbalance, the pointer computed is nullptr // and the load is not generated. for (int ij = 0; ij < facebook::cuda::detail::max(TileI, TileJ, TileK); ++ij) { const Complex* baseA = (ij >= TileI) ? nullptr : ((!ConjugateTransposeA) ? A[i + ij][k + kk][xdim].data() : A[k + kk][i + ij][xdim].data()) ; const Complex* baseB = (ij >= TileJ) ? nullptr : ((!ConjugateTransposeB) ? B[k + kk][j + ij][xdim].data() : B[j + ij][k + kk][xdim].data()) ; const Complex* base = (threadIdx.x < blockDim.x / 2) ? baseA : baseB; if (base) { swapViewFloat4[threadIdx.z][threadIdx.y][ij][threadIdx.x] = __ldg((const float4*)(base)); } } for (int ii = 0; ii < TileI; ++ii) { a[kk][ii] = swap[threadIdx.z][threadIdx.y][ii][0][threadIdx.x]; } for (int jj = 0; jj < TileJ; ++jj) { b[kk][jj] = swap[threadIdx.z][threadIdx.y][jj][1][threadIdx.x]; } } if (ConjugateTransposeA) { for (int kk = 0; kk < TileK; ++kk) { for (int ii = 0; ii < TileI; ++ii) { a[kk][ii] = a[kk][ii].conjugate(); } } } if (ConjugateTransposeB) { for (int kk = 0; kk < TileK; ++kk) { for (int jj = 0; jj < TileJ; ++jj) { b[kk][jj] = b[kk][jj].conjugate(); } } } for (int kk = 0; kk < TileK; ++kk) { for (int jj = 0; jj < TileJ; ++jj) { for (int ii = 0; ii < TileI; ++ii) { c[ii][jj] += a[kk][ii] * b[kk][jj]; } } } } // Actual // c C0 C2 C4 C6 C1 C3 C5 C7 for (int ii = 0; ii < TileI; ++ii) { for (int jj = 0; jj < TileJ; ++jj) { c[ii][jj].re() *= invNorm.re(); c[ii][jj].im() *= invNorm.re(); *(C[i + ii][j + jj][xy].dataAs<float2>()) = (float2)(c[ii][jj]); } } } } } // By construction, x * y is contiguous. // doall xy // doall i, j // red k // C[i][j][x * y] += A[i][k][x * y] * B[k][j][x * y] // // UpdateOutput : xy times o(b, f) <- i(b, p) . conj(f(f, p)) // AccGradParameters: xy times f(f, p) <- conj(o(b, f)) . i(b, p) // UpdateGradInput : xy times i(b, p) <- o(b, f) . f(f, p) template <bool ConjugateTransposeA, bool ConjugateTransposeB, int FFTSize, int C_J_Unroll, int C_I_Tile, int C_J_Tile, int ReductionUnroll, bool Accumulate> __launch_bounds__(32 * 32, 1) // 64 registers on K40 __global__ void transposeMMTiledKernelUnrolled(const DeviceTensor<Complex, 3> A, const DeviceTensor<Complex, 3> B, DeviceTensor<Complex, 3> C, Complex invNorm) { assert(C_J_Unroll == blockDim.y); assert(A.getSize(2) == C.getSize(2)); assert(B.getSize(2) == C.getSize(2)); assert(ConjugateTransposeA || A.getSize(0) == C.getSize(0)); assert(!ConjugateTransposeA || A.getSize(1) == C.getSize(0)); assert(ConjugateTransposeB || B.getSize(1) == C.getSize(1)); assert(!ConjugateTransposeB || B.getSize(0) == C.getSize(1)); assert(ConjugateTransposeA || ConjugateTransposeB || A.getSize(1) == B.getSize(0)); const int numRed = (ConjugateTransposeA) ? A.getSize(0) : A.getSize(1); // const int ubi = C.getSize(0); // assert(C.getSize(0) % (C_I_Tile * gridDim.x) == 0); // const int ubj = C.getSize(1); // assert(C.getSize(1) % (C_J_Tile * gridDim.y * blockDim.y) == 0); const int ubk = numRed; assert(numRed % ReductionUnroll == 0); const int numBatches = A.getSize(2); const int ubxy = ceil(numBatches, (int)(gridDim.z * blockDim.x)) * gridDim.z * blockDim.x; // for (int i = C_I_Tile * blockIdx.x; i < ubi; i += C_I_Tile * gridDim.x) { // for (int j = C_J_Tile * (blockIdx.y * blockDim.y + threadIdx.y); // j < ubj; // j += C_J_Tile * gridDim.y * blockDim.y) { { { int i = C_I_Tile * blockIdx.x; int j = C_J_Tile * (blockIdx.y * blockDim.y + threadIdx.y); // for (int xy = blockDim.x * blockIdx.z + threadIdx.x; // xy < ubxy; // xy += gridDim.z * blockDim.x) { { int xy = blockDim.x * blockIdx.z + threadIdx.x; Complex a[C_I_Tile]; Complex b[C_J_Tile][ReductionUnroll]; Complex c[C_I_Tile][C_J_Tile]; for (int k = 0; k < ubk; k += ReductionUnroll) { // Load B from device to registers with boundary check and static // optimization of those checks. for (int jj = 0; jj < C_J_Tile; ++jj) { for (int kk = 0; kk < ReductionUnroll; ++kk) { b[jj][kk] = (ConjugateTransposeB) ? ldg(&B[j + jj][k + kk][xy]) : // delay conjugate reduces dep ldg(&B[k + kk][j + jj][xy]); } } // Use init to hide some latencies if (k == 0) { for (int ii = 0; ii < C_I_Tile; ++ii) { for (int jj = 0; jj < C_J_Tile; ++jj) { c[ii][jj] = (Accumulate) ? C[i + ii][j + jj][xy] : Complex(0.0f); } } } // Load A from device to shared with boundary check and static // optimization of those checks. // Distribute loads across blockIdx.y __shared__ Complex as[C_I_Tile][ReductionUnroll][FFTSize]; assert(C_I_Tile <= blockDim.y); // Kill WAW dependence __syncthreads(); if (threadIdx.y < C_I_Tile) { int ii = threadIdx.y; for (int kk = 0; kk < ReductionUnroll; ++kk) { as[ii][kk][threadIdx.x] = (ConjugateTransposeA) ? ldg(&A[k + kk][i + ii][xy]) : // delay conjugate reduces dep ldg(&A[i + ii][k + kk][xy]); } } // Kill RAW dependence __syncthreads(); // Perform partial accumulation for (int kk = 0; kk < ReductionUnroll; ++kk) { for (int ii = 0; ii < C_I_Tile; ++ii) { a[ii] = as[ii][kk][threadIdx.x]; } for (int jj = 0; jj < C_J_Tile; ++jj) { for (int ii = 0; ii < C_I_Tile; ++ii) { if (ConjugateTransposeA) { c[ii][jj] = a[ii].conjugate() * b[jj][kk] + c[ii][jj]; } else if (ConjugateTransposeB) { c[ii][jj] = a[ii] * b[jj][kk].conjugate() + c[ii][jj]; } else { c[ii][jj] = a[ii] * b[jj][kk] + c[ii][jj]; } } } } } for (int ii = 0; ii < C_I_Tile; ++ii) { for (int jj = 0; jj < C_J_Tile; ++jj) { c[ii][jj].re() *= invNorm.re(); c[ii][jj].im() *= invNorm.re(); *(C[i + ii][j + jj][xy].dataAs<float2>()) = (float2)(c[ii][jj]); } } } } } } // By construction, x * y is contiguous. // doall xy // doall i, j // red k // C[i][j][x * y] += A[i][k][x * y] * B[k][j][x * y] // // UpdateOutput : xy times o(b, f) <- i(b, p) . conj(f(f, p)) // AccGradParameters: xy times f(f, p) <- conj(o(b, f)) . i(b, p) // UpdateGradInput : xy times i(b, p) <- o(b, f) . f(f, p) template <bool ConjugateTransposeA, bool ConjugateTransposeB, int C_XY_Placement_ThreadIdx_X, int C_J_Unroll, int C_I_Tile, int C_J_Tile, int ReductionUnroll, bool StaticUnrollA, bool StaticUnrollB, bool StaticUnrollCI, bool StaticUnrollCJ, bool StaticUnrollXY, bool StaticUnrollReduction, bool Accumulate> __launch_bounds__(32 * 32, 1) __global__ void transposeMMTiledKernel(const DeviceTensor<Complex, 3> A, const DeviceTensor<Complex, 3> B, DeviceTensor<Complex, 3> C, Complex invNorm) { assert(C_J_Unroll == blockDim.y); assert(C_XY_Placement_ThreadIdx_X == blockDim.x); assert(A.getSize(2) == C.getSize(2)); assert(B.getSize(2) == C.getSize(2)); assert(ConjugateTransposeA || A.getSize(0) == C.getSize(0)); assert(!ConjugateTransposeA || A.getSize(1) == C.getSize(0)); assert(ConjugateTransposeB || B.getSize(1) == C.getSize(1)); assert(!ConjugateTransposeB || B.getSize(0) == C.getSize(1)); assert(ConjugateTransposeA || ConjugateTransposeB || A.getSize(1) == B.getSize(0)); const int numRed = (ConjugateTransposeA) ? A.getSize(0) : A.getSize(1); const int ubi = (StaticUnrollCI) ? C.getSize(0) : ceil(C.getSize(0), (int)(C_I_Tile * gridDim.x)) * C_I_Tile * gridDim.x; assert(!StaticUnrollCI || C.getSize(0) % (C_I_Tile * gridDim.x) == 0); const int ubj = (StaticUnrollCJ) ? C.getSize(1) : ceil(C.getSize(1), (int)(C_J_Tile * gridDim.y * blockDim.y)) * C_J_Tile * gridDim.y * blockDim.y; assert(!StaticUnrollCJ || C.getSize(1) % (C_J_Tile * gridDim.y * blockDim.y) == 0); const int ubk = (StaticUnrollReduction) ? numRed : ceil(numRed, ReductionUnroll) * ReductionUnroll; assert(!StaticUnrollReduction || numRed % ReductionUnroll == 0); const int numBatches = A.getSize(2); const int ubxy = ceil(numBatches, (int)(gridDim.z * blockDim.x)) * gridDim.z * blockDim.x; for (int i = C_I_Tile * blockIdx.x; i < ubi; i += C_I_Tile * gridDim.x) { for (int j = C_J_Tile * (blockIdx.y * blockDim.y + threadIdx.y); j < ubj; j += C_J_Tile * gridDim.y * blockDim.y) { for (int xy = blockDim.x * blockIdx.z + threadIdx.x; xy < ubxy; xy += gridDim.z * blockDim.x) { Complex a[C_I_Tile]; Complex b[C_J_Tile][ReductionUnroll]; Complex c[C_I_Tile][C_J_Tile]; for (int k = 0; k < ubk; k += ReductionUnroll) { // Kill WAW dependence __syncthreads(); // Load B from device to registers with boundary check and static // optimization of those checks. for (int jj = 0; jj < C_J_Tile; ++jj) { if ((StaticUnrollXY || xy < numBatches) && (StaticUnrollB || (ConjugateTransposeB && j + jj < B.getSize(0)) || (!ConjugateTransposeB && j + jj < B.getSize(1)))) { for (int kk = 0; kk < ReductionUnroll; ++kk) { b[jj][kk] = (StaticUnrollReduction || k + kk < numRed) ? ((ConjugateTransposeB) ? ldg(&B[j + jj][k + kk][xy]).conjugate() : ldg(&B[k + kk][j + jj][xy])) : Complex(0.0f); } } else { for (int kk = 0; kk < ReductionUnroll; ++kk) { b[jj][kk] = Complex(0.0f); } } } // Load A from device to shared with boundary check and static // optimization of those checks. // Distribute loads across blockIdx.y __shared__ Complex as[C_I_Tile][ReductionUnroll][C_XY_Placement_ThreadIdx_X + 1]; assert(C_I_Tile <= blockDim.y); if (threadIdx.y < C_I_Tile) { int ii = threadIdx.y; if ((StaticUnrollXY || xy < numBatches) && (StaticUnrollA || (ConjugateTransposeA && i + ii < A.getSize(1)) || (!ConjugateTransposeA && i + ii < A.getSize(0)))) { for (int kk = 0; kk < ReductionUnroll; ++kk) { as[ii][kk][threadIdx.x] = (StaticUnrollReduction || k + kk < numRed) ? ((ConjugateTransposeA) ? ldg(&A[k + kk][i + ii][xy]).conjugate() : ldg(&A[i + ii][k + kk][xy])) : Complex(0.0f); } } else { for (int kk = 0; kk < ReductionUnroll; ++kk) { as[ii][kk][threadIdx.x] = Complex(0.0f); } } } // Use init to hide some latencies if (k == 0) { for (int ii = 0; ii < C_I_Tile; ++ii) { for (int jj = 0; jj < C_J_Tile; ++jj) { c[ii][jj] = (Accumulate && (StaticUnrollCI || i + ii < C.getSize(0)) && (StaticUnrollCJ || j + jj < C.getSize(1)) && (StaticUnrollXY || xy < numBatches)) ? C[i + ii][j + jj][xy] : Complex(0.0f); } } } // Kill RAW dependence __syncthreads(); // Perform partial accumulation for (int kk = 0; kk < ReductionUnroll; ++kk) { for (int ii = 0; ii < C_I_Tile; ++ii) { a[ii] = as[ii][kk][threadIdx.x]; } for (int jj = 0; jj < C_J_Tile; ++jj) { for (int ii = 0; ii < C_I_Tile; ++ii) { c[ii][jj] = a[ii] * b[jj][kk] + c[ii][jj]; } } } } if (StaticUnrollXY || xy < numBatches) { for (int ii = 0; ii < C_I_Tile && (StaticUnrollCI || i + ii < C.getSize(0)); ++ii) { for (int jj = 0; jj < C_J_Tile && (StaticUnrollCJ || j + jj < C.getSize(1)); ++jj) { c[ii][jj].re() *= invNorm.re(); c[ii][jj].im() *= invNorm.re(); *(C[i + ii][j + jj][xy].dataAs<float2>()) = (float2)(c[ii][jj]); } } } } } } } struct HalfFtor { HalfFtor() {} void operator()(int& n) { n >>= 1; } }; } // ns detail template <int Dim, bool ConjugateTransposeA, bool ConjugateTransposeB, bool Accumulate> void transposeMM(DeviceTensor<float, Dim>& A, DeviceTensor<float, Dim>& B, DeviceTensor<float, Dim>& C, float invNorm, cudaStream_t s) { int szA[Dim - 1]; int stA[Dim - 1]; std::copy(A.sizes(), A.sizes() + Dim - 1, szA); std::copy(A.strides(), A.strides() + Dim - 1, stA); std::for_each(&stA[0], &stA[Dim - 1], detail::HalfFtor()); int szB[Dim - 1]; int stB[Dim - 1]; std::copy(B.sizes(), B.sizes() + Dim - 1, szB); std::copy(B.strides(), B.strides() + Dim - 1, stB); std::for_each(&stB[0], &stB[Dim - 1], detail::HalfFtor()); int szC[Dim - 1]; int stC[Dim - 1]; std::copy(C.sizes(), C.sizes() + Dim - 1, szC); std::copy(C.strides(), C.strides() + Dim - 1, stC); std::for_each(&stC[0], &stC[Dim - 1], detail::HalfFtor()); DeviceTensor<Complex, Dim - 1> cA(A.template dataAs<Complex>(), szA, stA); DeviceTensor<Complex, Dim - 1> cB(B.template dataAs<Complex>(), szB, stB); DeviceTensor<Complex, Dim - 1> cC(C.template dataAs<Complex>(), szC, stC); DeviceTensor<Complex, 3> dcA = cA.template downcastInner<3>(); DeviceTensor<Complex, 3> dcB = cB.template downcastInner<3>(); DeviceTensor<Complex, 3> dcC = cC.template downcastInner<3>(); #define INSTANTIATE_FBMM_FULLY_UNROLLED( \ C_J_Unroll, \ C_I_Tile, \ C_J_Tile, \ ReductionUnroll) \ { \ bool StaticUnrollA = \ ((ConjugateTransposeA && (dcA.getSize(1) % C_I_Tile == 0)) || \ (!ConjugateTransposeA && (dcA.getSize(0) % C_I_Tile == 0))); \ bool StaticUnrollB = \ ((ConjugateTransposeB && (dcB.getSize(0) % C_J_Tile == 0)) || \ (!ConjugateTransposeB && (dcB.getSize(1) % C_J_Tile == 0))); \ bool StaticUnrollCI = (dcC.getSize(0) % C_I_Tile == 0); \ bool StaticUnrollCJ = (dcC.getSize(1) % (C_J_Unroll * C_J_Tile) == 0); \ const int numRed = \ (ConjugateTransposeA) ? dcA.getSize(0) : dcA.getSize(1); \ bool StaticUnrollReduction = (numRed % ReductionUnroll == 0); \ if (debug) { \ LOG(INFO) << StaticUnrollA << " " << StaticUnrollB << " " \ << StaticUnrollCI << " " << StaticUnrollCJ << " " \ << StaticUnrollReduction; \ LOG(INFO) << StaticUnrollA << " " << StaticUnrollB << " " \ << StaticUnrollCI << " " << StaticUnrollCJ << " " \ << StaticUnrollReduction; \ } \ if (StaticUnrollA && StaticUnrollB && StaticUnrollCI && \ StaticUnrollCJ && StaticUnrollReduction) { \ if (debug) { \ LOG(INFO) << "Params: " << C_J_Unroll << " " << \ C_I_Tile << " " << \ C_J_Tile << " " << \ ReductionUnroll; \ } \ /* Needed for proper loading of data */ \ CHECK_LE(C_I_Tile, C_J_Unroll); \ dim3 blocks(ceil(dcC.getSize(0), C_I_Tile), \ ceil(dcC.getSize(1), C_J_Unroll * C_J_Tile), \ FFTSize / 2 + 1); \ dim3 threads(FFTSize, C_J_Unroll); \ detail::transposeMMTiledKernelUnrolled<ConjugateTransposeA, \ ConjugateTransposeB, \ FFTSize, \ C_J_Unroll, \ C_I_Tile, \ C_J_Tile, \ ReductionUnroll, \ Accumulate> \ <<<blocks, threads, 0, s>>>(dcA, dcB, dcC, Complex(invNorm)); \ return; \ } \ } #define INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEI, TILEJ, TILEK, TILEITHY, TILEJTHZ, FFTELEMENTS) \ { \ constexpr int TileI = TILEI; \ constexpr int TileJ = TILEJ; \ constexpr int TileK = TILEK; \ constexpr int TileIThreadIdxY = TILEITHY; \ constexpr int TileJThreadIdxZ = TILEJTHZ; \ constexpr int FFTElements = FFTELEMENTS; \ if (dcC.getSize(0) % (TileI * TileIThreadIdxY) == 0 && \ dcC.getSize(1) % (TileJ * TileJThreadIdxZ) == 0 && \ ( \ (!ConjugateTransposeA && ((dcA.getSize(1) % TileK) == 0)) || \ ( ConjugateTransposeA && ((dcA.getSize(0) % TileK) == 0)) \ ) && \ (FFTSize * (FFTSize / 2 + 1)) % (2 * FFTElements) == 0) { \ if (debug) { \ LOG(INFO) << " TileI = " << TileI \ << " TileJ = " << TileJ \ << " TileK = " << TileK \ << " TileIThreadIdxY = " << TileIThreadIdxY \ << " TileJThreadIdxZ = " << TileJThreadIdxZ \ << " FFTElements = " << FFTElements; \ } \ static_assert(FFTSize % FFTElements == 0, \ "float4 reads requires FFTSize % FFTElements == 0");\ dim3 blocks(ceil(dcC.getSize(0), TileI * TileIThreadIdxY), \ ceil(dcC.getSize(1), TileJ * TileJThreadIdxZ), \ ceil(FFTSize * (FFTSize / 2 + 1), FFTElements)); \ dim3 threads(FFTElements, TileIThreadIdxY, TileJThreadIdxZ); \ detail::transposeMMTiledKernelSmall<ConjugateTransposeA, \ ConjugateTransposeB, \ FFTSize, \ FFTElements, \ TileI, \ TileJ, \ TileK, \ TileIThreadIdxY, \ TileJThreadIdxZ, \ Accumulate> \ <<<blocks, threads, 0, s>>> (dcA, dcB, dcC, Complex(invNorm)); \ return; \ } \ } // Always look permutations of (TILEI, TILEJ, TILEK) and (TILEITHY, TILEJTHZ) #define INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED( \ TILEI, TILEJ, TILEK, TILEITHY, TILEJTHZ, FFTELEMENTS) \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEI, TILEJ, TILEK, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEI, TILEJ, TILEK, TILEJTHZ, TILEITHY, FFTELEMENTS); \ \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEI, TILEK, TILEJ, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEI, TILEK, TILEJ, TILEJTHZ, TILEITHY, FFTELEMENTS); \ \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEJ, TILEI, TILEK, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEJ, TILEI, TILEK, TILEJTHZ, TILEITHY, FFTELEMENTS); \ \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEJ, TILEK, TILEI, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEJ, TILEK, TILEI, TILEJTHZ, TILEITHY, FFTELEMENTS); \ \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEK, TILEI, TILEJ, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEK, TILEI, TILEJ, TILEJTHZ, TILEITHY, FFTELEMENTS); \ \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEK, TILEJ, TILEI, TILEITHY, TILEJTHZ, FFTELEMENTS); \ INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED_IMPL( \ TILEK, TILEJ, TILEI, TILEJTHZ, TILEITHY, FFTELEMENTS); bool debug = false; if (debug) { LOG(INFO) << "ConjugateTransposeA: " << ConjugateTransposeA << " ConjugateTransposeB: " << ConjugateTransposeB << "\nA: " << A << " -> " << cA << " -> " << dcA << "\nB: " << B << " -> " << cB << " -> " << dcB << "\nC: " << C << " -> " << cC << " -> " << dcC; } // INSTANTIATE_FBMM_FULLY_UNROLLED(C_J_Unroll, // C_I_Tile, // C_J_Tile, // ReductionUnroll) // TODO: Add more instantiations to cover use cases properly if (dcA.getSize(2) == 3 * 4) { } else if (dcA.getSize(2) == 5 * 8) { constexpr int FFTSize = 8; INSTANTIATE_FBMM_FULLY_UNROLLED(32, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED(16, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED(12, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED( 8, /* */ 8, 2, 2); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 4, 4, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 4, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 4, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 2, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 2, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 1, 1, 4); // InputPlane = 3*k (RGB input mostly) INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 4, 4, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 4, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 4, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 2, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 2, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 1, 1, 4); // Batch size = 1 INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 4, 4, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 4, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 4, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 2, 2, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 2, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 1, 1, 4); // Fallback INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(2, 2, 2, 1, 1, 4); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 1, 1, 1, 1, 4); } else if (dcA.getSize(2) == 9 * 16) { constexpr int FFTSize = 16; INSTANTIATE_FBMM_FULLY_UNROLLED(16, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED(12, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED( 8, /* */ 8, 2, 2); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 4, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 4, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 2, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 2, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 1, 1, 8); // InputPlane = 3*k (RGB input mostly) INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 4, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 4, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 2, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 2, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 1, 1, 8); // Batch size = 1 INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 4, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 4, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 2, 2, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 2, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 1, 1, 8); // Fallback INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(2, 2, 2, 1, 1, 8); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 1, 1, 1, 1, 8); } else if (dcA.getSize(2) == 17 * 32) { constexpr int FFTSize = 32; INSTANTIATE_FBMM_FULLY_UNROLLED(8, /* */ 8, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED(6, /* */ 4, 2, 2); INSTANTIATE_FBMM_FULLY_UNROLLED(4, /* */ 4, 2, 2); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 2, 2, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(4, 4, 4, 1, 1, 16); // InputPlane = 3*k (RGB input mostly) INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 1, 4, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 1, 2, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(3, 4, 4, 1, 1, 16); // Batch size = 1 INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 1, 4, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 1, 2, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 4, 4, 1, 1, 16); // Fallback INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(2, 2, 2, 1, 1, 16); INSTANTIATE_FBMM_SMALL_FULLY_UNROLLED(1, 1, 1, 1, 1, 16); } else if (dcA.getSize(2) == 33 * 64) { } else if (dcA.getSize(2) == 65 * 128) { } // Fallback cases if (debug) { LOG(WARNING) << "Unspecialized case, performance will be very bad"; } // Default case, performance wil most likely be bad if we get here #define C_I_Tile 4 #define C_J_Tile 2 #define ReductionUnroll 1 #define C_J_Unroll 4 #define C_XY_Placement_ThreadIdx_X 4 #define C_XY_Placement_BlockIdx_Z 1 dim3 blocks(ceil(dcC.getSize(0), C_I_Tile), ceil(dcC.getSize(1), C_J_Unroll * C_J_Tile), C_XY_Placement_BlockIdx_Z); dim3 threads(C_XY_Placement_ThreadIdx_X, C_J_Unroll); detail::transposeMMTiledKernel<ConjugateTransposeA, ConjugateTransposeB, C_XY_Placement_ThreadIdx_X, C_J_Unroll, C_I_Tile, C_J_Tile, ReductionUnroll, false, false, false, false, false, false, Accumulate> <<<blocks, threads, 0, s>>>(dcA, dcB, dcC, Complex(invNorm)); } }} // ns
the_stack
#include "generalized_projection.h" // clang-format on #include <ATen/Functions.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include "common.cuh" #include "utils.cuh" #include "vec_utils.cuh" namespace geomlib { namespace { const float kEps = 1e-8; template <int dim, typename scalar_t> __device__ inline void ComputeBarycentricGradient( const scalar_t* e1, const scalar_t* e2, const scalar_t e1_dot_e2, const scalar_t e1_norm_sqr, const scalar_t e2_norm_sqr, scalar_t* grad_w1, scalar_t* grad_w2) { { zero_out_vec<dim>(grad_w1); add_vec<dim>(grad_w1, e1); scalar_t tmp[dim]; scalar_times_vec<dim>(-e1_dot_e2 / (kEps + e2_norm_sqr), e2, tmp); add_vec<dim>(grad_w1, tmp); scalar_t denom = e1_norm_sqr - e1_dot_e2 * e1_dot_e2 / (kEps + e2_norm_sqr); scalar_times_vec<dim>(1 / (denom + kEps), grad_w1, grad_w1); } { zero_out_vec<dim>(grad_w2); add_vec<dim>(grad_w2, e2); scalar_t tmp[dim]; scalar_times_vec<dim>(-e1_dot_e2 / (kEps + e1_norm_sqr), e1, tmp); add_vec<dim>(grad_w2, tmp); scalar_t denom = e2_norm_sqr - e1_dot_e2 * e1_dot_e2 / (kEps + e1_norm_sqr); scalar_times_vec<dim>(1 / (denom + kEps), grad_w2, grad_w2); } } template <int dim, typename scalar_t> __device__ void GeneralizedTriangleProjection( const scalar_t* p, const scalar_t* v0, const scalar_t* e1, const scalar_t* e2, const scalar_t e1_dot_e2, const scalar_t e1_norm_sqr, const scalar_t e2_norm_sqr, const scalar_t* grad_w1, const scalar_t* grad_w2, // Below are results: scalar_t* result_dist, scalar_t* result_w1, scalar_t* result_w2) { scalar_t w1, w2; scalar_t p_minus_v0[dim]; // this array is unavoidable since we need to compute // this for every pair of query point and face bool degenerate = false; minus_vec<dim>(p, v0, p_minus_v0); if (grad_w1 == nullptr || grad_w2 == nullptr) { scalar_t b1 = dot_vec<dim>(e1, p_minus_v0); scalar_t b2 = dot_vec<dim>(e2, p_minus_v0); scalar_t det = e1_norm_sqr * e2_norm_sqr - e1_dot_e2 * e1_dot_e2; if (det < -kEps || det > kEps) { // Cramer's rule. w1 = (b1 * e2_norm_sqr - b2 * e1_dot_e2) / det; w2 = (b2 * e1_norm_sqr - b1 * e1_dot_e2) / det; } else { degenerate = true; } } else { w1 = dot_vec<dim>(grad_w1, p_minus_v0); w2 = dot_vec<dim>(grad_w2, p_minus_v0); } if (!degenerate && 0 <= 1 - w1 - w2 && 0 <= w1 && 0 <= w2) { scalar_t p_proj[dim]; { { scalar_t w1_e1[dim]; scalar_times_vec<dim>(w1, e1, w1_e1); scalar_times_vec<dim>(w2, e2, p_proj); add_vec<dim>(p_proj, w1_e1); } } *result_dist = distance_sqr_vec<dim>(p_minus_v0, p_proj); *result_w1 = w1; *result_w2 = w2; } else { // Project to three edges. scalar_t w1_tmp[3]; scalar_t w2_tmp[3]; w1_tmp[0] = clamp01(dot_vec<dim>(p_minus_v0, e1) / (kEps + e1_norm_sqr)); w2_tmp[0] = 0; w2_tmp[1] = clamp01(dot_vec<dim>(p_minus_v0, e2) / (kEps + e2_norm_sqr)); w1_tmp[1] = 0; { scalar_t numer = dot_vec<dim>(p_minus_v0, e2) - dot_vec<dim>(p_minus_v0, e1) - e1_dot_e2 + e1_norm_sqr; scalar_t denom = e1_norm_sqr + e2_norm_sqr - 2 * e1_dot_e2; w2_tmp[2] = clamp01(numer / (kEps + denom)); w1_tmp[2] = 1 - w2_tmp[2]; } scalar_t best_dist = FLT_MAX; int best_k = -1; for (int k = 0; k < 3; k++) { scalar_t p_proj[dim]; scalar_t w1_e1[dim]; scalar_times_vec<dim>(w1_tmp[k], e1, w1_e1); scalar_times_vec<dim>(w2_tmp[k], e2, p_proj); add_vec<dim>(p_proj, w1_e1); scalar_t dist = distance_sqr_vec<dim>(p_minus_v0, p_proj); if (dist < best_dist) { best_dist = dist; best_k = k; } } *result_dist = best_dist; *result_w1 = w1_tmp[best_k]; *result_w2 = w2_tmp[best_k]; } } template <int dim, typename scalar_t> __device__ void GeneralizedTetrahedronProjection( const scalar_t* p, // D const scalar_t* v0, // D const scalar_t* e_mat, // 3xD, const scalar_t* e_dot_mat, // 3x3 const scalar_t* e_dot_inv_mat, // 3x3 const scalar_t is_degenerate, // float, 1.0 if degenerate // Below are results: scalar_t* result_dist, // scalar scalar_t* result_weights // 3 ) { bool recurse = true; { scalar_t p_minus_v0[dim]; minus_vec<dim>(p, v0, p_minus_v0); scalar_t b[3]; for (int i = 0; i < 3; i++) { b[i] = dot_vec<dim>(&e_mat[i * dim], p_minus_v0); } if (is_degenerate < 0.5) { scalar_t weights[3]; mat_vec_mult<3>(e_dot_inv_mat, b, weights); scalar_t weight_op = 1 - weights[0] - weights[1] - weights[2]; if (weights[0] >= 0 && weights[1] >= 0 && weights[2] >= 0 && weight_op >= 0) { recurse = false; copy_vec<3>(result_weights, weights); scalar_t p_proj[dim]; zero_out_vec<dim>(p_proj); for (int i = 0; i < 3; i++) { scalar_t tmp[dim]; scalar_times_vec<dim>(result_weights[i], &e_mat[i * dim], tmp); add_vec<dim>(p_proj, tmp); } *result_dist = distance_sqr_vec<dim>(p, p_proj); } } } if (recurse) { // Calculate weights by projecting onto each of 4 faces. *result_dist = FLT_MAX; for (int i = 0; i < 4; i++) { scalar_t vc[dim]; scalar_t ecj[dim]; scalar_t eck[dim]; scalar_t ecj_dot_eck; scalar_t ecj_norm_sqr; scalar_t eck_norm_sqr; if (i < 3) { copy_vec<dim>(vc, v0); int j = i; int k = (i + 1) % 3; copy_vec<dim>(ecj, &e_mat[j * dim]); copy_vec<dim>(eck, &e_mat[k * dim]); ecj_dot_eck = e_dot_mat[j * 3 + k]; ecj_norm_sqr = e_dot_mat[j * 3 + j]; eck_norm_sqr = e_dot_mat[k * 3 + k]; } else { plus_vec<dim>(v0, &e_mat[2 * dim], vc); minus_vec<dim>(&e_mat[0 * dim], &e_mat[2 * dim], ecj); minus_vec<dim>(&e_mat[1 * dim], &e_mat[2 * dim], eck); ecj_dot_eck = dot_vec<dim>(ecj, eck); ecj_norm_sqr = dot_vec<dim>(ecj, ecj); eck_norm_sqr = dot_vec<dim>(eck, eck); } scalar_t cur_dist; scalar_t cur_weights[2]; GeneralizedTriangleProjection<dim>( p, vc, ecj, eck, ecj_dot_eck, ecj_norm_sqr, eck_norm_sqr, (const scalar_t*)nullptr, (const scalar_t*)nullptr, &cur_dist, &cur_weights[0], &cur_weights[1]); if (cur_dist < *result_dist) { *result_dist = cur_dist; zero_out_vec<3>(result_weights); if (i < 3) { result_weights[i] = cur_weights[0]; result_weights[(i + 1) % 3] = cur_weights[1]; } else { result_weights[0] = cur_weights[0]; result_weights[1] = cur_weights[1]; result_weights[2] = 1 - cur_weights[0] - cur_weights[1]; } } } } } template <int dim, typename scalar_t> __global__ void GeneralizedTriangleProjectionKernel( const scalar_t* __restrict__ points, // P x D const size_t num_faces, // Pre-computed values: const scalar_t* __restrict__ v0, // FxD, vertex 0 of faces const scalar_t* __restrict__ e1, // FxD, vertex 1 - vertex 0 const scalar_t* __restrict__ e2, // FxD const scalar_t* __restrict__ e1_dot_e2, // F const scalar_t* __restrict__ e1_norm_sqr, // F const scalar_t* __restrict__ e2_norm_sqr, // F const scalar_t* __restrict__ grad_w1, // FxD, dw1/dp on the face const scalar_t* __restrict__ grad_w2, // FxD // Results: scalar_t* __restrict__ result_dists, int* __restrict__ result_idxs, scalar_t* __restrict__ result_w1, scalar_t* __restrict__ result_w2) { extern __shared__ char shared_buf[]; scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[num_threads] size_t* min_tids = (size_t*)&min_dists[blockDim.x]; // size_t[num_threads] // Each block computing the projection of a single point. const size_t i = blockIdx.x; // Faces are divided evenly among the threads. const size_t tid = threadIdx.x; const scalar_t* p = &points[i * dim]; // point to project scalar_t min_dist = FLT_MAX; size_t min_idx = 0; scalar_t min_w1; scalar_t min_w2; for (int j = tid; j < num_faces; j += blockDim.x) { scalar_t dist, w1, w2; GeneralizedTriangleProjection<dim, scalar_t>( p, &v0[j * dim], &e1[j * dim], &e2[j * dim], e1_dot_e2[j], e1_norm_sqr[j], e2_norm_sqr[j], &grad_w1[j * dim], &grad_w2[j * dim], &dist, &w1, &w2); if (dist < min_dist) { min_dist = dist; min_idx = j; min_w1 = w1; min_w2 = w2; } } min_dists[tid] = min_dist; min_tids[tid] = tid; __syncthreads(); ReduceMin(min_dists, min_tids, tid); __syncthreads(); // Finally thread with min_dist writes the result to the output. if (tid == min_tids[0]) { result_dists[i] = min_dist; // squared minimum distance result_idxs[i] = min_idx; assert(min_idx < num_faces); result_w1[i] = min_w1; result_w2[i] = min_w2; } } template <int dim, typename scalar_t> __global__ void GeneralizedTetrahedronProjectionKernel( const scalar_t* __restrict__ points, // P x D const size_t num_tets, // Pre-computed values: const scalar_t* __restrict__ v0, // TxD, vertex 0 of faces const scalar_t* __restrict__ e_mat, // Tx3xD, vertex i - vertex 0 const scalar_t* __restrict__ e_dot_mat, // Tx3x3 const scalar_t* __restrict__ e_dot_inv_mat, // Tx3x3 const scalar_t* __restrict__ is_degenerate, // T // Results: scalar_t* __restrict__ result_dists, int* __restrict__ result_idxs, scalar_t* __restrict__ result_weights // Px3 ) { extern __shared__ char shared_buf[]; scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[num_threads] size_t* min_tids = (size_t*)&min_dists[blockDim.x]; // size_t[num_threads] // Each block computing the projection of a single point. const size_t i = blockIdx.x; // Faces are divided evenly among the threads. const size_t tid = threadIdx.x; const scalar_t* p = &points[i * dim]; // point to project scalar_t min_dist = FLT_MAX; size_t min_idx = 0; scalar_t min_weights[3]; for (int j = tid; j < num_tets; j += blockDim.x) { scalar_t dist; scalar_t weights[3]; GeneralizedTetrahedronProjection<dim, scalar_t>( p, &v0[j * dim], &e_mat[j * 3 * dim], &e_dot_mat[j * 3 * 3], &e_dot_inv_mat[j * 3 * 3], is_degenerate[j], &dist, weights); if (dist < min_dist) { min_dist = dist; min_idx = j; copy_vec<3>(min_weights, weights); } } min_dists[tid] = min_dist; min_tids[tid] = tid; __syncthreads(); ReduceMin(min_dists, min_tids, tid); // Finally thread with min_dist writes the result to the output. if (tid == min_tids[0]) { result_dists[i] = min_dist; // squared minimum distance result_idxs[i] = min_idx; assert(min_idx < num_tets); copy_vec<3>(&result_weights[i * 3], min_weights); } } } // namespace template <int dim> std::vector<torch::Tensor> ComputeGeneralizedTriangleProjection( torch::Tensor points, const TriangularProjectionInfo& info) { CHECK_INPUT(points); TORCH_CHECK(points.size(1) == dim); TORCH_CHECK(info.dim == dim); TORCH_CHECK(points.dtype() == torch::kFloat32 || points.dtype() == torch::kFloat64); TORCH_CHECK(points.dtype() == info.v0.dtype()); at::cuda::CUDAGuard device_guard{points.device()}; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int num_points = points.size(0); int num_faces = info.num_faces; torch::Tensor result_dists = torch::zeros({num_points}, points.options()).contiguous(); torch::Tensor result_idxs = torch::zeros({num_points}, points.options().dtype(torch::kInt32)) .contiguous(); torch::Tensor result_w1 = torch::zeros({num_points}, points.options()).contiguous(); torch::Tensor result_w2 = torch::zeros({num_points}, points.options()).contiguous(); size_t num_threads = 128; dim3 num_blocks(num_points); size_t shared_size = num_threads * ((points.dtype() == torch::kFloat32 ? sizeof(float) : sizeof(double)) + sizeof(size_t)); AT_DISPATCH_FLOATING_TYPES( points.scalar_type(), "ComputeGeneralizedTriangleProjection", [&] { GeneralizedTriangleProjectionKernel<dim, scalar_t> <<<num_blocks, num_threads, shared_size, stream>>>( points.contiguous().data_ptr<scalar_t>(), static_cast<size_t>(num_faces), info.v0.contiguous().data_ptr<scalar_t>(), info.e1.contiguous().data_ptr<scalar_t>(), info.e2.contiguous().data_ptr<scalar_t>(), info.e1_dot_e2.contiguous().data_ptr<scalar_t>(), info.e1_norm_sqr.contiguous().data_ptr<scalar_t>(), info.e2_norm_sqr.contiguous().data_ptr<scalar_t>(), info.grad_w1.contiguous().data_ptr<scalar_t>(), info.grad_w2.contiguous().data_ptr<scalar_t>(), result_dists.data_ptr<scalar_t>(), result_idxs.data_ptr<int>(), result_w1.data_ptr<scalar_t>(), result_w2.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return {result_dists, result_idxs.to(torch::kInt64), result_w1, result_w2}; } template <int dim> std::vector<torch::Tensor> ComputeGeneralizedTetrahedronProjection( torch::Tensor points, torch::Tensor vertices, torch::Tensor tets) { CHECK_INPUT(points); CHECK_INPUT(vertices); CHECK_INPUT(tets); TORCH_CHECK(points.size(1) == dim); TORCH_CHECK(vertices.size(1) == dim); TORCH_CHECK(points.dtype() == torch::kFloat32 || points.dtype() == torch::kFloat64); TORCH_CHECK(tets.dtype() == torch::kInt64); // torch requires 64-bit int for indexing TORCH_CHECK(points.dtype() == vertices.dtype()); at::cuda::CUDAGuard device_guard{points.device()}; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Pre-compute reusable values. using namespace torch::indexing; std::vector<torch::Tensor> v_list; for (int i = 0; i < 4; i++) { v_list.push_back(vertices.index({tets.index({Slice(), i}), Slice()})); } std::vector<torch::Tensor> e_list; for (int i = 1; i < 4; i++) { e_list.push_back(v_list[i] - v_list[0]); } std::vector<torch::Tensor> e_dot_list; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { e_dot_list.push_back((e_list[i] * e_list[j]).sum(-1)); } } auto e_mat = torch::stack(e_list, 1); // Tx3xD auto e_dot_mat = torch::stack(e_dot_list, 1).reshape({-1, 3, 3}); // Tx3x3 auto e_dot_mat_det = torch::linalg_det(e_dot_mat); // T auto is_degenerate = (e_dot_mat_det.abs() < kEps).to(points.dtype()); // T auto e_dot_inv_mat = torch::linalg_pinv(e_dot_mat); // Tx3x3 size_t num_points = points.size(0); size_t num_tets = tets.size(0); torch::Tensor result_dists = torch::zeros({static_cast<int>(num_points)}, points.options()) .contiguous(); torch::Tensor result_idxs = torch::zeros({static_cast<int>(num_points)}, tets.options().dtype(torch::kInt32)) .contiguous(); torch::Tensor result_weights = torch::zeros({static_cast<int>(num_points), 3}, points.options()) .contiguous(); // w1, w2, w3 of barycentric coordinates size_t num_threads = 128; dim3 num_blocks(num_points); size_t shared_size = num_threads * ((points.dtype() == torch::kFloat32 ? sizeof(float) : sizeof(double)) + sizeof(size_t)); AT_DISPATCH_FLOATING_TYPES( points.scalar_type(), "ComputeGeneralizedTetrahedronProjection", [&] { GeneralizedTetrahedronProjectionKernel<dim, scalar_t> <<<num_blocks, num_threads, shared_size, stream>>>( points.contiguous().data_ptr<scalar_t>(), num_tets, v_list[0].contiguous().data_ptr<scalar_t>(), e_mat.contiguous().data_ptr<scalar_t>(), e_dot_mat.contiguous().data_ptr<scalar_t>(), e_dot_inv_mat.contiguous().data_ptr<scalar_t>(), is_degenerate.contiguous().data_ptr<scalar_t>(), result_dists.data_ptr<scalar_t>(), result_idxs.data_ptr<int>(), result_weights.data_ptr<scalar_t>()); }); AT_CUDA_CHECK(cudaGetLastError()); return {result_dists, result_idxs.to(torch::kInt64), result_weights}; } // Explicit instantiations. template std::vector<torch::Tensor> ComputeGeneralizedTriangleProjection<3>( torch::Tensor points, const TriangularProjectionInfo& info); template std::vector<torch::Tensor> ComputeGeneralizedTriangleProjection<8>( torch::Tensor points, const TriangularProjectionInfo& info); template std::vector<torch::Tensor> ComputeGeneralizedTetrahedronProjection<3>( torch::Tensor points, torch::Tensor vertices, torch::Tensor tets); } // namespace geomlib
the_stack
#include "WorkAndObjectMatch.h" // Host 全局常量:_scalModulus(扩缩系数) // 对 TEST 图像进行扩缩时使用的扩缩系数 // 暂时未实现扩缩操作,先注释掉 //static const float _scalModulus[] = { 0.80f, 0.85f, 0.90f, 0.95f, 1.0f, // 1.05f, 1.10f, 1.15f, 1.20f }; // Host 全局常量:_scalModulusCount(扩缩系数的数量) // 对 TEST 图像进行扩缩的扩缩系数的数量 static const int _scalModulusCount = 9; // Host 函数:_shrink(对一组图像分别进行 1 / N 缩小) // 对给定的一组图像进行 1 / N 缩小 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返回 // NO_ERROR _shrink( Image **inimg, // 输入的一组图像 Image **outimg, // 输出的一组经过 1 / N 缩小的图像 int imgcount, // 输入图像的数量 int tiems // 需要缩小的图像的倍数 ); // Host 函数:_deleteBigImageArray(删除一个大图片数组) // 删除一个存放图片的大数组 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返回 NO_ERROR _deleteBigImageArray( Image **img, // 存放图片的大数组 int imgcount // 数组的大小 ); // Host 函数:_createBigImageArray(创建一个大图片的数组) // 创建一个存放图片的大的数组 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返回 // NO_ERROR _createBigImageArray( Image ***img, // 存放图片的数组的指针 int imgcount // 需要创建的数组的大小 ); // Host 函数:_getBestMatchTestIndex(获取相关系数最大的结果) // 获取相关系数最大的结果 static __host__ int // 返回值:最大相关系数的索引 _getBestMatchTestIndex( MatchRes *res, // 匹配得到的一组结果 int rescount // 结果的数量 ); // Host 函数:_scalAndProjective(对图像进行扩缩和射影变换) // 对图像进行不同的扩缩和射影变换,扩缩系数由 _scalModulus 指定 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返回 // NO_ERROR _scalAndProjective( Image *inimg, // 输入图像 Image **outimg, // 经过不同扩缩和射影变换得到的一组输出图像 int imgcount // 输出图像的个数 ); // Host 函数:_createAffineImages(生成 2 * anglecount 个回转图像) // 对 TEST 图像生成 2 * angleCount 个角度的回转图像,回转角的范围是 // angle - 0.2 * anglecount ~ angle + 0.2 * anglecount static __host__ int // 返回值:函数是否正确执行,若函数正确执行, // 返回 NO_ERROR _createAffineImages( Image *test, // 输入图像 int twx, int twy, // 回转中心的横坐标和纵坐标 float angle, // 基准回转角 int rwidth, int rheight, // 回转后的图像的宽和高 int anglecount, // 需要回转的角度的数量 Image **rotatetest // 输出图像,回转后的图像 ); // Host 函数:_shrink(对一组图像分别进行 1 / N 缩小) static __host__ int _shrink(Image **inimg, Image **outimg, int imgcount, int times) { // 判断 inimg 和 outimg 是否为空,若为空,则返回错误 if (inimg == NULL || outimg == NULL) return NULL_POINTER; int errcode; // 局部变量,错误码 // 判断参数是否合法,若不合法,直接返回错误 if (imgcount <= 0 || times <= 0) return INVALID_DATA; // 定义一个用来进行 1 / N 缩小操作的对象 DownSampleImage shrink(times); // 依次对每一输入图像进行 1 / N 缩小操作 for (int i = 0; i < imgcount; i++) { // 使用概率法进行 1 / N 缩小 errcode = shrink.probabilityDownSImg(inimg[i], outimg[i]); // 若 1 / N 缩小操作失败,则直接返回 if (errcode != NO_ERROR) return errcode; } // 处理完毕,返回 NO_ERROR return NO_ERROR; } // Host 函数:_deleteBigImageArray(删除一个大图片数组) static __host__ int _deleteBigImageArray(Image **img, int imgcount) { // 判断 img 是否为空,若为空,则返回错误 if (img == NULL) return NULL_POINTER; // 依次删除数组里的每一张图像 for (int i = 0; i < imgcount; i++) ImageBasicOp::deleteImage(img[i]); // 删除存放图片的数组 delete [] img; // 处理完毕,返回 NO_ERROR return NO_ERROR; } // Host 函数:_createBigImageArray(创建一个大图片的数组) static __host__ int _createBigImageArray(Image ***img, int imgcount) { // 判断 img 是否为空,若为空,则返回错误 if (img == NULL) return NULL_POINTER; int errcode; // 局部变量,错误码 // 为数组申请空间 *img = new Image *[imgcount]; // 若申请空间失败,返回错误 if (*img == NULL) return OUT_OF_MEM; // 依次创建指定数量的图片 for (int i = 0; i < imgcount; i++) { errcode = ImageBasicOp::newImage(&((*img)[i])); // 若创建图片失败,删除先前创建的图片,然后返回 if (errcode != NO_ERROR) { _deleteBigImageArray(*img, imgcount); return errcode; } } // 处理完毕,返回 NO_ERROR return NO_ERROR; } // Host 函数:_getBestMatchTestIndex(获取相关系数最大的结果) static __host__ int _getBestMatchTestIndex(MatchRes *res, int rescount) { // 判断 res 是否为空,若为空,则返回错误 if (res == NULL) return NULL_POINTER; // 默认相关系数最大的位置为第 0 个结果 int maxindex = 0; // 记录最大相关系数的值,默认为第 0 个结果的相关系数 float max = res[maxindex].coefficient; // 依次和所有其他的结果比较 for (int i = 1; i < rescount; i++) { // 若发现当前结果的相关系数比记录的最大值大,则将当前的相关系数赋值 // 给 max,同时记录当前结果的索引 if (max < res[i].coefficient) { max = res[i].coefficient; maxindex = i; } } // 返回具有最大相关系数的结果的索引 return maxindex; } // Host 函数:_scalAndProjective(对图像进行扩缩和射影变换) static __host__ int _scalAndProjective(Image *inimg, Image **outimg, int imgcount) { // 判断 inimg 和 outimg 是否为空,若为空,则返回错误 if (inimg == NULL || outimg == NULL) return NULL_POINTER; int errcode; // 局部变量,错误码 // 此处暂时未实现扩缩和射影变换,只是单纯的拷贝图片 for (int i = 0; i < imgcount; i++) { errcode = ImageBasicOp::copyToHost(inimg, outimg[i]); if (errcode != NO_ERROR) { return errcode; } } // 处理完毕,返回 NO_ERROR return NO_ERROR; } // Host 函数:_createAffineImages(生成 2 * anglecount 个回转图像) static __host__ int _createAffineImages(Image *test, int twx, int twy, float angle, int rwidth, int rheight, int anglecount, Image **rotatetest) { // 判断 test 和 rotatetest 是否为空,若为空,则返回错误 if (test == NULL || rotatetest == NULL) return NULL_POINTER; int errcode; // 局部变量,错误码 // 检查参数是否合法,若不合法,直接返回错误 if (test == NULL || rotatetest == NULL) return NULL_POINTER; // 定义一个进行回转操作的对象 AffineTrans affine; // 设置旋转前的平移向量 affine.setX(test->width / 2 - twx); affine.setY(test->height / 2 - twy); // 生成 2 * anglecount 个回转图像,角度分别为 // angle - 0.2 * anglecount ~ angle + 0.2 * anglecount for (int i = 0; i < 2 * anglecount; i++) { // 创建一个临时图片 Image *t; errcode = ImageBasicOp::newImage(&t); // 如果申请空间失败,则直接返回错误 if (errcode != NO_ERROR) return errcode; // 设置回旋的角度为 errcode = affine.setAlpha(angle + 0.2 * (i - anglecount)); // 若设置失败,则直接返回错误 if (errcode != NO_ERROR) return errcode; // 对输入图像进行回转 errcode = affine.rotateShift(test, t); // 若回转失败,则直接返回错误 if (errcode != NO_ERROR) return errcode; // 设置回转后的图像的子图的大小 t->roiX1 = t->width / 2 - rwidth / 2; t->roiY1 = t->height / 2 - rheight / 2; t->roiX2 = t->roiX1 + rwidth; t->roiY2 = t->roiY1 + rheight; // 将子图从临时图像中 clip 出来 RoiCopy copy; errcode = copy.roiCopyAtDevice(t, rotatetest[i]); // 删除临时图像 ImageBasicOp::deleteImage(t); // 若拷贝子图失败,则直接返回 if (errcode != NO_ERROR) return errcode; } // 处理完毕,直接返回 return NO_ERROR; } // 成员方法: 获取 TEST 图像中的 WORK 图像 __host__ int WorkAndObjectMatch::getMatchWork(Image *test, Image *work) { // 检查 test 和 res 是否为空,若为空,则返回错误 if (test == NULL || work == NULL) return NULL_POINTER; int errcode; // 局部变量,错误码 // 定义一个大的图像数组,为后面的操作提供所需要的图像空间 Image **bigimagesarray; // 标记 bigimagesarray 数组的大小 int bigimagessize; // 图像数组的游标 Image **cursor; // 计算图像数组的大小 bigimagessize = normalWork->count + 2 * _scalModulusCount + 2 * angleCount; // 创建指定大小的图像数组,为后面的操作提供图像空间 errcode = _createBigImageArray(&bigimagesarray, bigimagessize); // 若图像数组创建失败,则直接返回 if (errcode != NO_ERROR) return errcode; // 初始化游标为当前图像地址 cursor = bigimagesarray; // 对标准的 WORK 图像进行 1 / 8 缩小 // 存储 1 / 8 缩小的标准 WORK 图像 Image **shrinknormalwork; // 从图像数组中获取空间 shrinknormalwork = cursor; // 更新游标的位置 cursor += normalWork->count; // 对一组标准的 WORK 图像进行 1 / 8 图像缩小 errcode = _shrink(normalWork->images, shrinknormalwork, normalWork->count, 8); // 若 1 / 8 图像缩小操作失败,则删除图像数组,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return errcode; } // 对 TEST 图像进行扩缩和射影变换 // 存储 TEST 图像进行不同扩缩系数的扩缩和射影变换得到的图像 Image **scalprojecttest; // 从图像数组中获取空间 scalprojecttest = cursor; // 更新游标的位置 cursor += _scalModulusCount; // 对 TEST 图像进行不同的扩缩和射影变换 errcode = _scalAndProjective(test, scalprojecttest, _scalModulusCount); // 若扩缩和射影变换操作失败,则删除图像数组的空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return errcode; } // 对 TEST 的各个变形的图像进行 1 / 8 缩小 // 存储对 TEST 图像的各个变形的图像进行 1 / 8 缩小后的图像 Image **shrinkscalprojecttest; // 从图像数组中获取空间 shrinkscalprojecttest = cursor; // 更新游标的位置 cursor += _scalModulusCount; // 对变形的各个 TEST 图像进行 1 / 8 缩小操作 errcode = _shrink(scalprojecttest, shrinkscalprojecttest, _scalModulusCount, 8); // 如果 1 / 8 操作失败,则删除图像数组,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return errcode; } // 用 1 / 8 缩小的,具有不同回旋角的 WORK 图像分别在 1 / 8 缩小的 TEST 图像 // 进行匹配 // 申请一段内存空间,用来存储匹配得到的结果 MatchRes *workmatchres = new MatchRes[_scalModulusCount]; // 若申请内存失败,则删除图像数组,然后返回错误 if (workmatchres == NULL) { _deleteBigImageArray(bigimagesarray, bigimagessize); return OUT_OF_MEM; } // 定义一个用来进行图像匹配的对象 ImageMatch match; // 设置旋转表 match.setRotateTable(normalWork->rotateTable); // 设置摄动范围的宽 match.setDWidth(normalWork->dWidth); // 设置摄动范围的高 match.setDHeight(normalWork->dHeight); // 设置摄动中心的横坐标 match.setDX(normalWork->dX); // 设置摄动中心的纵坐标 match.setDY(normalWork->dY); // 设置匹配需要的 TEMPLATE 图像 errcode = match.setTemplateImage(shrinknormalwork, normalWork->count); // 如果设置 TEMPLATE 图像失败,则释放先前申请的内存空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); delete [] workmatchres; return errcode; } // 用 1 / 8 缩小的,具有不同回旋角的 WORK 图像分别在 1 / 8 缩小的经过扩缩和 // 摄影变换 TEST 图像进行匹配 for (int i = 0; i < _scalModulusCount; i++) { errcode = match.imageMatch(shrinkscalprojecttest[i], &workmatchres[i], NULL); // 若匹配操作失败,则返回释放先前申请的空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); delete [] workmatchres; return errcode; } } // 找到最佳匹配的 TEST 图像的索引 int besttestindex = _getBestMatchTestIndex(workmatchres, _scalModulusCount); // 获取扩缩和摄影变换后, 1 / 8 缩小前的最佳匹配的 TEST 图像 Image *matchtest = scalprojecttest[besttestindex]; // 对扩缩和摄影变换后,缩小前的最佳匹配的 TEST 图像生成 2 * angleCount 个 // 角度的回转图像 // 用来存储得到的 2 * angleCount 个回转角的回转图像 Image **rotatetest; // 从图像数组中获取空间 rotatetest = cursor; // 更新游标 cursor += 2 * angleCount; // 计算 1 / 8 缩小前在 TEST 图像的匹配中心,由 1 / 8 缩小的 TEST 图像匹配 // 得到的最佳匹配中心乘 8 得到 int twx = workmatchres[besttestindex].matchX * 8; int twy = workmatchres[besttestindex].matchY * 8; // 获取 1 / 8 缩小的 TEST 图像匹配得到旋转角 float angle = workmatchres[besttestindex].angle; // 设置图像回转后的大小,这里设置为标准 WORK 图像的 1.5 倍 int rwidth = normalWork->images[0]->width * 3 / 2; int rheight = normalWork->images[0]->height * 3 / 2; // 删除先前申请的用来记录匹配得到的结果的空间 delete [] workmatchres; // 创建 2 * angleCount 个角度的回转图像 errcode = _createAffineImages(matchtest, twx, twy, angle, rwidth, rheight, angleCount, rotatetest); // 如果创建失败,则释放图像数组的空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return NO_ERROR; } // 分别用标准 WORK 图像对 2 * angleCount 个角度的回转图像进行匹配 // 设置旋转表,由于之前已经对图像进行了回转,所以这里旋转角设置为 0 RotateTable table(0.0f, 0.0f, 0.2f, rwidth * 2, rheight * 2); // 创建用来进行图像匹配操作的对象 ImageMatch rmatch(&table, rwidth, rheight, rwidth / 2, rheight / 2, 3, 0, 0, 0, 0); // 设置匹配的 TEMPLATE 图像 errcode = rmatch.setTemplateImage(normalWork->images, normalWork->count); // 如果设置失败,释放图像数组空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return errcode; } // 申请用来存储匹配得到的结果的空间 workmatchres = new MatchRes[2 * angleCount]; // 分别用标准 WORK 图像对 2 * angleCount 个角度的回转图像进行匹配 for (int i = 0; i < 2 * angleCount; i++) { errcode = rmatch.imageMatch(rotatetest[i], &workmatchres[i], NULL); // 如果匹配失败,则释放之前申请的空间,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); delete [] workmatchres; return errcode; } } // 找出 rotatetest 中的最大匹配的图像的索引 int bestrotatetestindex = _getBestMatchTestIndex(workmatchres, 2 * angleCount); // 获取最佳匹配的图像 Image *matchrotatetest = rotatetest[bestrotatetestindex]; // 计算最佳匹配图像的回转角 float bestangle = angle + 0.2 * (bestrotatetestindex - angleCount); // 获取最佳匹配的中心 int cx = workmatchres[bestrotatetestindex].matchX; int cy = workmatchres[bestrotatetestindex].matchY; // 删除之前申请的存放匹配结果的内存空间 delete [] workmatchres; // 对得到的最佳匹配的 rotatetest 图像进行方向矫正处理 // 计算回转的移动向量 int tx = rwidth / 2 - cx; int ty = rheight / 2 - cy; // 定义用来进行回转操作的对象 AffineTrans aff(AFFINE_SOFT_IPL, tx, ty, -bestangle); // 对 rotatetest 图像进行方向矫正处理 errcode = aff.rotateShift(matchrotatetest, work); // 如果回转操作失败,则删除图像数组,然后返回错误 if (errcode != NO_ERROR) { _deleteBigImageArray(bigimagesarray, bigimagessize); return errcode; } // 删除图像数组 _deleteBigImageArray(bigimagesarray, bigimagessize); // 处理完毕,返回 NO_ERROR return NO_ERROR; } // 成员方法:workAndObjectMatch(进行 WORK and OBJECT 进行匹配操作) __host__ int WorkAndObjectMatch::workAndObjectMatch(Image *test, MatchRes *res, int rescount) { // 检查 test 和 res 是否为空,若为空,则返回错误 if (test == NULL || res == NULL) return NULL_POINTER; // 检查 rescount 是否合法,若不合法,则返回错误 if (rescount <= 0) return INVALID_DATA; // 检查 normalWork 和 objects 是否为空,若为空,则返回错误 if (normalWork == NULL || objects == NULL) return NULL_POINTER; // 检查标准 WORK 图像的数量是否合法,若不合法,直接返回错误 if (normalWork->count <= 0) return INVALID_DATA; int errcode; // 局部变量,错误码 Image *work; // 局部变量,TEST 图像中的 WORK 图像 // 为 work 申请空间 errcode = ImageBasicOp::newImage(&work); // 若失败,则直接返回错误 if (errcode != NO_ERROR) return errcode; // 获取 TEST 图片中的 WORK 图片 errcode = getMatchWork(test, work); // 若获取失败,则释放 work 图像,然后返回错误 if (errcode != NO_ERROR) { ImageBasicOp::deleteImage(work); return errcode; } // 计算 objectCount 和 rescount 的最小值,用来指定匹配的次数 int objectcount = objectCount < rescount ? objectCount : rescount; // 分别用各个 OBJECT 图像对 work 图像进行匹配 for (int i = 0; i < objectcount; i++) { // 定义一个用来匹配的对象 ImageMatch match; // 设置匹配的摄动范围的宽 match.setDWidth(objects[i].dWidth); // 设置匹配的摄动范围的高 match.setDHeight(objects[i].dHeight); // 设置摄动中心的横坐标 match.setDX(objects[i].dX); // 设置摄动中心的纵坐标 match.setDY(objects[i].dY); // 设置旋转表 match.setRotateTable(objects[i].rotateTable); // 设置 TEMPLATE 图像 errcode = match.setTemplateImage(objects[i].images, objects[i].count); // 若设置失败,则释放 work 图像的空间,然后返回错误 if (errcode != NO_ERROR) { ImageBasicOp::deleteImage(work); return errcode; } // 用 OBJECT 图像对 work 图像进行匹配 errcode = match.imageMatch(work, &res[i], NULL); // 若匹配发生错误,则释放 work 图像的空间,然后返回错误 if (errcode != NO_ERROR) { ImageBasicOp::deleteImage(work); return errcode; } } // 释放 work 图像的空间 ImageBasicOp::deleteImage(work); // 处理完毕,返回 NO_ERROR return NO_ERROR; }
the_stack
#pragma once #include <gunrock/util/array_utils.cuh> #include <gunrock/graph/graph_base.cuh> #include <gunrock/graph/coo.cuh> #include <gunrock/util/binary_search.cuh> namespace gunrock { namespace graph { /** * @brief CSC data structure which uses Compressed Sparse Column * format to store a graph. It is a compressed way to present * the graph as a sparse matrix. * * @tparam VertexT Vertex identifier type. * @tparam SizeT Graph size type. * @tparam ValueT Associated value type. */ template <typename _VertexT = int, typename _SizeT = _VertexT, typename _ValueT = _VertexT, GraphFlag _FLAG = GRAPH_NONE | HAS_CSC, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault, bool VALID = true> struct Csc : public GraphBase<_VertexT, _SizeT, _ValueT, _FLAG | HAS_CSC, cudaHostRegisterFlag> { typedef _VertexT VertexT; typedef _SizeT SizeT; typedef _ValueT ValueT; static const GraphFlag FLAG = _FLAG | HAS_CSC; static const util::ArrayFlag ARRAY_FLAG = util::If_Val<(FLAG & GRAPH_PINNED) != 0, (FLAG & ARRAY_RESERVE) | util::PINNED, FLAG & ARRAY_RESERVE>::Value; typedef GraphBase<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> BaseGraph; typedef Csc<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag> CscT; typedef typename util::If<(FLAG & HAS_EDGE_VALUES) != 0, ValueT, util::NullType>::Type EdgeValueT; typedef typename util::If<(FLAG & HAS_NODE_VALUES) != 0, ValueT, util::NullType>::Type NodeValueT; // Column indices corresponding to all the // non-zero values in the sparse matrix util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> row_indices; // List of indices where each row of the // sparse matrix starts util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag> column_offsets; // typedef util::Array1D<SizeT, ValueT, ARRAY_FLAG, // cudaHostRegisterFlag> Array_ValueT; // typedef util::NullArray<SizeT, ValueT, ARRAY_FLAG, // cudaHostRegisterFlag> Array_NValueT; // List of values attached to edges in the graph // typename util::If<(FLAG & HAS_EDGE_VALUES) != 0, // Array_ValueT, Array_NValueT >::Type edge_values; util::Array1D<SizeT, EdgeValueT, ARRAY_FLAG, cudaHostRegisterFlag> edge_values; // List of values attached to nodes in the graph // typename util::If<(FLAG & HAS_NODE_VALUES) != 0, // Array_ValueT, Array_NValueT >::Type node_values; // Array_ValueT node_values; util::Array1D<SizeT, NodeValueT, ARRAY_FLAG, cudaHostRegisterFlag> node_values; /** * @brief CSC Constructor * * @param[in] pinned Use pinned memory for CSC data structure * (default: do not use pinned memory) */ Csc() : BaseGraph() { column_offsets.SetName("column_offsets"); row_indices.SetName("row_indices"); edge_values.SetName("edge_values"); node_values.SetName("node_values"); } /** * @brief CSC destructor */ __host__ __device__ ~Csc() { // Release(); } /** * @brief Deallocates CSC graph */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(column_offsets.Release(target)); GUARD_CU(row_indices.Release(target)); GUARD_CU(node_values.Release(target)); GUARD_CU(edge_values.Release(target)); GUARD_CU(BaseGraph ::Release(target)); return retval; } /** * @brief Allocate memory for CSC graph. * * @param[in] nodes Number of nodes in COO-format graph * @param[in] edges Number of edges in COO-format graph */ cudaError_t Allocate(SizeT nodes, SizeT edges, util::Location target = GRAPH_DEFAULT_TARGET) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseGraph ::Allocate(nodes, edges, target)); GUARD_CU(column_offsets.Allocate(nodes + 1, target)); GUARD_CU(row_indices.Allocate(edges, target)); GUARD_CU(node_values.Allocate(nodes, target)); GUARD_CU(edge_values.Allocate(edges, target)); return retval; } cudaError_t Move(util::Location source, util::Location target, cudaStream_t stream = 0) { cudaError_t retval = cudaSuccess; // SizeT invalid_size = util::PreDefinedValues<SizeT>::InvalidValue; GUARD_CU(BaseGraph ::Move(source, target, stream)); GUARD_CU(column_offsets.Move(source, target, this->nodes + 1, 0, stream)); GUARD_CU(row_indices.Move(source, target, this->edges, 0, stream)); GUARD_CU(node_values.Move(source, target, this->nodes, 0, stream)); GUARD_CU(edge_values.Move(source, target, this->edges, 0, stream)); return retval; } template <typename VertexT_in, typename SizeT_in, typename ValueT_in, GraphFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t FromCsc(Csc<VertexT_in, SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { cudaError_t retval = cudaSuccess; if (target == util::LOCATION_DEFAULT) target = source.column_offsets.GetSetted() | source.column_offsets.GetAllocated(); // if (retval = BaseGraph::Set(source)) // return retval; this->nodes = source.nodes; this->edges = source.edges; this->directed = source.directed; GUARD_CU(Allocate(source.nodes, source.edges, target)); GUARD_CU(column_offsets.Set(source.column_offsets, this->nodes + 1, target, stream)); GUARD_CU(row_indices.Set(source.row_indices, this->edges, target, stream)); GUARD_CU(edge_values.Set(source.edge_values, this->edges, target, stream)); GUARD_CU(node_values.Set(source.node_values, this->nodes, target, stream)); return retval; } /** * @brief Build CSC graph from COO graph, sorted or unsorted * * @param[in] output_file Output file to dump the graph topology info * @param[in] coo Pointer to COO-format graph * @param[in] coo_nodes Number of nodes in COO-format graph * @param[in] coo_edges Number of edges in COO-format graph * @param[in] ordered_rows Are the rows sorted? If not, sort them. * @param[in] undirected Is the graph directed or not? * @param[in] reversed Is the graph reversed or not? * @param[in] quiet Don't print out anything. * * Default: Assume rows are not sorted. */ template <typename GraphT> cudaError_t FromCoo(GraphT &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, // bool ordered_rows = false, // bool undirected = false, // bool reversed = false, bool quiet = false) { typedef typename GraphT::CooT CooT; // typedef Coo<VertexT_in, SizeT_in, ValueT_in, FLAG_in, // cudaHostRegisterFlag_in> CooT; util::PrintMsg( "Converting " + std::to_string(source.CooT::nodes) + " vertices, " + std::to_string(source.CooT::edges) + (source.CooT::directed ? " directed" : " undirected") + " edges (" + (source.CooT::edge_order == BY_COLUMN_ASCENDING ? " ordered" : "unordered") + " tuples) to CSC format...", !quiet, false); time_t mark1 = time(NULL); cudaError_t retval = cudaSuccess; if (target == util::LOCATION_DEFAULT) target = source.CooT::edge_pairs.GetSetted() | source.CooT::edge_pairs.GetAllocated(); /*if (retval = BaseGraph:: template Set<typename CooT::CooT>((typename CooT::CooT)source)) return retval; */ this->nodes = source.CooT::nodes; this->edges = source.CooT::edges; this->directed = source.CooT::directed; GUARD_CU(Allocate(source.CooT::nodes, source.CooT::edges, target)); // Sort COO by row GUARD_CU(source.CooT::Order(BY_COLUMN_ASCENDING, target, stream)); // source.CooT::Display(); // assign row_indices GUARD_CU(row_indices.ForEach( source.CooT::edge_pairs, [] __host__ __device__(VertexT & row_index, const typename CooT::EdgePairT &edge_pair) { row_index = edge_pair.x; }, this->edges, target, stream)); // assign edge_values if (FLAG & HAS_EDGE_VALUES) { GUARD_CU(edge_values.ForEach( source.CooT::edge_values, [] __host__ __device__(EdgeValueT & edge_value, const typename CooT::ValueT &edge_value_in) { edge_value = edge_value_in; }, this->edges, target, stream)); } if (FLAG & HAS_NODE_VALUES) { GUARD_CU(node_values.ForEach( source.CooT::node_values, [] __host__ __device__(NodeValueT & node_value, const typename CooT::ValueT &node_value_in) { node_value = node_value_in; }, this->nodes, target, stream)); } // assign column_offsets SizeT edges = this->edges; SizeT nodes = this->nodes; auto column_edge_compare = [] __host__ __device__( const typename CooT::EdgePairT &edge_pair, const VertexT &column) { return edge_pair.y < column; }; GUARD_CU(column_offsets.ForAll( source.CooT::edge_pairs, [nodes, edges, column_edge_compare] __host__ __device__( SizeT * column_offsets, const typename CooT::EdgePairT *edge_pairs, const VertexT &column) { if (column <= edge_pairs[0].y) column_offsets[column] = 0; else if (column < nodes) { auto pos = util::BinarySearch_LeftMost( column, edge_pairs, (SizeT)0, edges - 1, column_edge_compare, [](const typename CooT::EdgePairT &pair, const VertexT &column) { return (pair.y == column); }); while (pos < edges && column > edge_pairs[pos].y) pos++; column_offsets[column] = pos; } else column_offsets[column] = edges; }, this->nodes + 1, target, stream)); time_t mark2 = time(NULL); util::PrintMsg("Done (" + std::to_string(mark2 - mark1) + "s).", !quiet); return retval; } template <typename GraphT> cudaError_t FromCsr(GraphT &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { typedef typename GraphT::CsrT CsrT; typedef Coo<VertexT, SizeT, ValueT, FLAG | HAS_COO, cudaHostRegisterFlag> CooT; cudaError_t retval = cudaSuccess; CooT coo; GUARD_CU(coo.FromCsr(source, target, stream, quiet)); GUARD_CU(FromCoo(coo, target, stream, quiet)); GUARD_CU(coo.Release()); return retval; } /** * @brief Display CSC graph to console * * @param[in] with_edge_value Whether display graph with edge values. */ cudaError_t Display(std::string graph_prefix = "", SizeT nodes_to_show = 40, bool with_edge_values = true) { cudaError_t retval = cudaSuccess; if (nodes_to_show > this->nodes) nodes_to_show = this->nodes; util::PrintMsg(graph_prefix + "Graph containing " + std::to_string(this->nodes) + " vertices, " + std::to_string(this->edges) + " edges, in CSC format." + " Neighbor list of first " + std::to_string(nodes_to_show) + " nodes :"); for (SizeT node = 0; node < nodes_to_show; node++) { std::string str = ""; for (SizeT edge = column_offsets[node]; edge < column_offsets[node + 1]; edge++) { if (edge - column_offsets[node] > 40) break; str = str + "[" + std::to_string(row_indices[edge]); if (with_edge_values && (FLAG & HAS_EDGE_VALUES)) { str = str + "," + std::to_string(edge_values[edge]); } if (edge - column_offsets[node] != 40 && edge != column_offsets[node + 1] - 1) str = str + "], "; else str = str + "]"; } if (column_offsets[node + 1] - column_offsets[node] > 40) str = str + "..."; str = str + " : v " + std::to_string(node) + " " + std::to_string(column_offsets[node]); util::PrintMsg(str); } return retval; } __device__ __host__ __forceinline__ SizeT GetNeighborListLength(const VertexT &v) const { if (util::lessThanZero(v) || v >= this->nodes) return 0; return _ldg(column_offsets + (v + 1)) - _ldg(column_offsets + v); } __device__ __host__ __forceinline__ SizeT GetNeighborListOffset(const VertexT &v) const { return _ldg(column_offsets + v); } __device__ __host__ __forceinline__ VertexT GetEdgeSrc(const SizeT &e) const { return util::BinarySearch_RightMost(e, column_offsets + 0, (SizeT)0, this->nodes); } __device__ __host__ __forceinline__ VertexT GetEdgeDest(const SizeT &e) const { // return _ldg(row_indices + e); return row_indices[e]; } __device__ __host__ __forceinline__ void GetEdgeSrcDest(const SizeT &e, VertexT &src, VertexT &dest) const { src = util::BinarySearch_RightMost(e, column_offsets + 0, (SizeT)0, this->nodes); dest = row_indices[e]; } }; // CSC template <typename VertexT, typename SizeT, typename ValueT, GraphFlag _FLAG, unsigned int cudaHostRegisterFlag> struct Csc<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag, false> { cudaError_t Release(util::Location target = util::LOCATION_ALL) { return cudaSuccess; } template <typename CooT_in> cudaError_t FromCoo(CooT_in &coo, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } template <typename CsrT_in> cudaError_t FromCsr(CsrT_in &csr, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } template <typename CscT_in> cudaError_t FromCsc(CscT_in &csc, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } __host__ __device__ __forceinline__ SizeT GetNeighborListLength(const VertexT &v) const { return 0; } cudaError_t Move(util::Location source, util::Location target, cudaStream_t stream = 0) { return cudaSuccess; } cudaError_t Display(std::string graph_prefix = "", SizeT nodes_to_show = 40, bool with_edge_values = true) { return cudaSuccess; } }; } // namespace graph } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <torch/types.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> static __device__ __forceinline__ int floor_div(int a, int b) { int t = 1 - a / b; return (a + t * b) / b - t; } struct UpFirDn2DKernelParams { int up_x; int up_y; int down_x; int down_y; int pad_x0; int pad_x1; int pad_y0; int pad_y1; int flip; float gain; int major_dim; int in_h; int in_w; int minor_dim; int kernel_h; int kernel_w; int out_h; int out_w; int loop_major; int loop_x; }; template <typename scalar_t> __global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; int out_y = minor_idx / p.minor_dim; minor_idx -= out_y * p.minor_dim; int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; int major_idx_base = blockIdx.z * p.loop_major; if (out_x_base >= p.out_w || out_y >= p.out_h || major_idx_base >= p.major_dim) { return; } int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; if (p.flip) kernel_y = p.kernel_h - 1 - kernel_y; for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major && major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, out_x = out_x_base; loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; if (p.flip) kernel_x = p.kernel_w - 1 - kernel_x; const scalar_t *x_p = &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; int x_px = p.minor_dim; int k_px = (p.flip) ? p.up_x : -p.up_x; int x_py = p.in_w * p.minor_dim; int k_py = ((p.flip) ? p.up_y : -p.up_y) * p.kernel_w; scalar_t v = 0.0f; for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p); x_p += x_px; k_p += k_px; } x_p += x_py - w * x_px; k_p += k_py - w * k_px; } v *= p.gain; out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w> __global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, const scalar_t *kernel, const UpFirDn2DKernelParams p) { const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; __shared__ volatile float sk[kernel_h][kernel_w]; __shared__ volatile float sx[tile_in_h][tile_in_w]; int minor_idx = blockIdx.x; int tile_out_y = minor_idx / p.minor_dim; minor_idx -= tile_out_y * p.minor_dim; tile_out_y *= tile_out_h; int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; int major_idx_base = blockIdx.z * p.loop_major; if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) { return; } for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) { int ky = tap_idx / kernel_w; int kx = tap_idx - ky * kernel_w; scalar_t v = 0.0; if (kx < p.kernel_w & ky < p.kernel_h) { int kky = (p.flip) ? ky : p.kernel_h - 1 - ky; int kkx = (p.flip) ? kx : p.kernel_w - 1 - kx; v = kernel[kky * p.kernel_w + kkx]; } sk[ky][kx] = v; } for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) { for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) { int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; int tile_in_x = floor_div(tile_mid_x, up_x); int tile_in_y = floor_div(tile_mid_y, up_y); __syncthreads(); for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) { int rel_in_y = in_idx / tile_in_w; int rel_in_x = in_idx - rel_in_y * tile_in_w; int in_x = rel_in_x + tile_in_x; int in_y = rel_in_y + tile_in_y; scalar_t v = 0.0; if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx]; } sx[rel_in_y][rel_in_x] = v; } __syncthreads(); for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) { int rel_out_y = out_idx / tile_out_w; int rel_out_x = out_idx - rel_out_y * tile_out_w; int out_x = rel_out_x + tile_out_x; int out_y = rel_out_y + tile_out_y; int mid_x = tile_mid_x + rel_out_x * down_x; int mid_y = tile_mid_y + rel_out_y * down_y; int in_x = floor_div(mid_x, up_x); int in_y = floor_div(mid_y, up_y); int rel_in_x = in_x - tile_in_x; int rel_in_y = in_y - tile_in_y; int kernel_x = (in_x + 1) * up_x - mid_x - 1; int kernel_y = (in_y + 1) * up_y - mid_y - 1; if (out_x < p.out_w & out_y < p.out_h) { scalar_t v = 0.0; #pragma unroll for (int y = 0; y < kernel_h / up_y; y++) #pragma unroll for (int x = 0; x < kernel_w / up_x; x++) v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x]; v *= p.gain; out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v; } } } } } torch::Tensor upfirdn2d_op(const torch::Tensor &input, const torch::Tensor &kernel, int up_x, int up_y, int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, int pad_y1, bool flip, float gain) { int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); UpFirDn2DKernelParams p; auto x = input.contiguous(); auto k = kernel.contiguous(); p.major_dim = x.size(0); p.in_h = x.size(1); p.in_w = x.size(2); p.minor_dim = x.size(3); p.kernel_h = k.size(0); p.kernel_w = k.size(1); p.up_x = up_x; p.up_y = up_y; p.down_x = down_x; p.down_y = down_y; p.pad_x0 = pad_x0; p.pad_x1 = pad_x1; p.pad_y0 = pad_y0; p.pad_y1 = pad_y1; p.flip = (flip) ? 1 : 0; p.gain = gain; p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y; p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x; auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); int mode = -1; int tile_out_h = -1; int tile_out_w = -1; AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { void *cuda_kernel = (void *)upfirdn2d_kernel_large<scalar_t>; if (p.up_x == 2 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 1 && p.kernel_w <= 24) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 2, 1, 1, 1, 1, 24, 8, 128>; tile_out_h = 8; tile_out_w = 128; } if (p.up_x == 2 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 1 && p.kernel_w <= 12) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 2, 1, 1, 1, 1, 12, 8, 128>; tile_out_h = 8; tile_out_w = 128; } if (p.up_x == 1 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 24 && p.kernel_w <= 1) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 2, 1, 1, 24, 1, 32, 32>; tile_out_h = 32; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 12 && p.kernel_w <= 1) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 2, 1, 1, 12, 1, 32, 32>; tile_out_h = 32; tile_out_w = 32; } // if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 1 && p.kernel_h <= 1 && p.kernel_w <= 24) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 2, 1, 1, 24, 8, 64>; tile_out_h = 8; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 1 && p.kernel_h <= 1 && p.kernel_w <= 12) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 2, 1, 1, 12, 8, 64>; tile_out_h = 8; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 2 && p.kernel_h <= 24 && p.kernel_w <= 1) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 1, 2, 24, 1, 16, 32>; tile_out_h = 16; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 2 && p.kernel_h <= 12 && p.kernel_w <= 1) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 1, 2, 12, 1, 16, 32>; tile_out_h = 16; tile_out_w = 32; } // if (p.up_x == 4 && p.up_y == 4 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 24 && p.kernel_w <= 24) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 4, 4, 1, 1, 24, 24, 8, 32>; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 12 && p.kernel_w <= 12) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 12, 12, 8, 32>; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 4 && p.down_y == 4 && p.kernel_h <= 24 && p.kernel_w <= 24) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 4, 4, 24, 24, 8, 32>; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 12 && p.kernel_w <= 12) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 12, 12, 8, 32>; tile_out_h = 8; tile_out_w = 32; } // if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>; tile_out_h = 16; tile_out_w = 64; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>; tile_out_h = 8; tile_out_w = 32; } if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) { cuda_kernel = (void *)upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>; tile_out_h = 8; tile_out_w = 32; } dim3 block_size; dim3 grid_size; if (tile_out_h > 0 && tile_out_w > 0) { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 1; block_size = dim3(32 * 8, 1, 1); grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, (p.major_dim - 1) / p.loop_major + 1); } else { p.loop_major = (p.major_dim - 1) / 16384 + 1; p.loop_x = 4; block_size = dim3(4, 32, 1); grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, (p.out_w - 1) / (p.loop_x * block_size.y) + 1, (p.major_dim - 1) / p.loop_major + 1); } scalar_t *out_p = out.data_ptr<scalar_t>(); scalar_t *x_p = x.data_ptr<scalar_t>(); scalar_t *k_p = k.data_ptr<scalar_t>(); void *args[] = {&out_p, &x_p, &k_p, &p}; AT_CUDA_CHECK( cudaLaunchKernel(cuda_kernel, grid_size, block_size, args, 0, stream)); }); return out; }
the_stack
__device__ float ApplyDarkMatterToFrame( const float* beadParamCube, const float* regionFrameCube, const float darkness, const int frame, const int num_frames, const int frameStride, const int regionFrameStride) { if( !ConfigP.UseDarkMatterPCA() ) return ((*(regionFrameCube + (RfDarkMatter0 + ConstFlowP.getNucId())*regionFrameStride + frame)) *darkness); float val = 0; regionFrameCube += RfDarkMatter0*regionFrameStride + frame; //RfDarkMatter0 beadParamCube += BpPCAval0*frameStride; //BpPCAval0 val += (*regionFrameCube) * (*beadParamCube); regionFrameCube += regionFrameStride; //RfDarkMatter1 beadParamCube += frameStride; //BpPCAval1 val += (*regionFrameCube) * (*beadParamCube); regionFrameCube += regionFrameStride; //RfDarkMatter2 beadParamCube += frameStride; //BpPCAval2 val += (*regionFrameCube) * (*beadParamCube); regionFrameCube += regionFrameStride; //RfDarkMatter3 beadParamCube += frameStride; //BpPCAval3 val += (*regionFrameCube) * (*beadParamCube); return val; } // compute tmid muc. This routine mimics CPU routine in BookKeeping/RegionaParams.cpp __device__ float ComputeMidNucTime( const float tmidNuc, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP ) { float tmid = tmidNuc; tmid += perNucRegP->getTMidNucDelay()* (tmidNuc - ConstGlobalP.getValveOpen()) / ( ConstGlobalP.getMagicDivisorForTiming() + SAFETYZERO); tmid += perFlowRegP->getTMidNucShift(); return tmid; } __device__ float ComputeETBR( //const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float RatioDrift, const float R, float copies, const int realFlowNum ) { float etbR; if (ConfigP.FitTauE()) { etbR = R; if (etbR) etbR = perNucRegP->getNucModifyRatio() /(perNucRegP->getNucModifyRatio() + (1.0f - (RatioDrift * (realFlowNum)/SCALEOFBUFFERINGCHANGE))* (1.0f / etbR - 1.0f)); } else { if ( !ConfigP.UseAlternativeEtbRequation()) { etbR = R*perNucRegP->getNucModifyRatio() + (1.0f - R*perNucRegP->getNucModifyRatio())* RatioDrift*(realFlowNum)/SCALEOFBUFFERINGCHANGE; } else { etbR = R*perNucRegP->getNucModifyRatio() + RatioDrift*copies*(realFlowNum)/(6.0*SCALEOFBUFFERINGCHANGE); } } return etbR; } __device__ float ComputeTauB( const ConstantParamsRegion * constRegP, const float etbR) { float tauB; if (ConfigP.FitTauE()) { tauB = etbR ? (constRegP->getTauE() / etbR) : ConstGlobalP.getMinTauB(); } else { tauB = constRegP->getTauRM()*etbR + constRegP->getTauRO(); } clampT(tauB, ConstGlobalP.getMinTauB(), ConstGlobalP.getMaxTauB()); return tauB; } __device__ float ComputeSP( const float copyDrift, const float copies, const int realFlowNum ) { return ((float)(COPYMULTIPLIER * copies) * powf(copyDrift, (float)realFlowNum)); } __device__ float ComputeSigma( const PerFlowParamsRegion *perFlowRegP, const PerNucParamsRegion *perNucRegP) { return (perFlowRegP->getSigma() * perNucRegP->getSigmaMult()); } __device__ const float4* precompute_pois_LUT_params_SingelFLowFit (int il, int ir) { int n; if( il == 0 && ir == 0 ) n = 0; //special case for the packed case for 0 < A < 1 else n = il+1; //layout: poiss_cdf[ei][i], poiss_cdf[ei+1][i], poiss_cdf[ei][i+1], poiss_cdf[ei+1][i+1] const float4* ptr = POISS_APPROX_LUT_CUDA_BASE + n * MAX_POISSON_TABLE_ROW; return ptr; } __device__ float poiss_cdf_approx_float4_SingelFLowFit (float x, const float4* ptr, float occ_l, float occ_r) { float ret; x *= 20.0f; int left = (int) x; int max_dim_minus_one = MAX_POISSON_TABLE_ROW - 1; float idelta = x-left; if (left > max_dim_minus_one ){ left = max_dim_minus_one; } float ifrac = 1.0f-idelta; float4 mixLUT = LDG_ACCESS(ptr, left); ret = ( ifrac * ( occ_l * mixLUT.w + occ_r * mixLUT.z ) + idelta * (occ_l * mixLUT.y + occ_r * mixLUT.x )); return ret; } //Provide emphasis stride to projection search __device__ float ProjectionSearch( const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float* observedTrace, const float* emphasisVec, const int frames, const float* nucRise, const float* deltaFrames, const float kmult, const float d, const float tauB, const float gain, const float SP, float* tmp_fval, int nucStart, int beadFrameStride, int emphStride, int nucIntLoopSteps ) { float Ampl = 1.0f; for (int i=0; i<2; ++i) { //TODO invariant code motion? BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, nucRise, Ampl, kmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, nucIntLoopSteps * nucStart, tmp_fval, deltaFrames, nucIntLoopSteps, frames); /*if (threadIdx.x == 0) { for (int i=0; i<frames; ++i) { printf("%f,", tmp_fval[i]); } printf("\n"); } __syncthreads();*/ float num = 0, den = 0.0001f; float emphasisVal; for (int j=nucStart; j<frames; ++j) { emphasisVal = emphasisVec[j*emphStride] * emphasisVec[j*emphStride]; num += tmp_fval[j]*observedTrace[j]*emphasisVal; // multiply by emphasis vectors den += tmp_fval[j]*tmp_fval[j]*emphasisVal; } Ampl *= (num/den); if (::isnan(Ampl)) Ampl = 1.0f; else clampT(Ampl, 0.001f, (float)LAST_POISSON_TABLE_COL); } return Ampl; } __device__ void BkgModelRedTraceCalculation( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, const int startFrame, const float * nucRise, float A, const float Krate, const float tau, const float gain, const float SP, const float d, float sens, int c_dntp_top_ndx, float * fval, const float* deltaFrame, const int nucIntLoopSteps, const int endFrames ) { if ( A!=A ) A=0.0001f; // safety check if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety int ileft = ( int ) A; float idelta = A-ileft; int iright = ileft+1; float ifrac = 1-idelta; ileft--; iright--; float occ_l = ifrac; // lower mixture float occ_r = idelta; // upper mixture if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright == LAST_POISSON_TABLE_COL) { iright = ileft; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; float pact = occ_l + occ_r; const float4 * LUTptr = precompute_pois_LUT_params_SingelFLowFit (ileft, iright); float totocc = SP*A; float totgen = totocc; // We reuse this constant every loop... float cp_sid_kmax_nucid = perNucRegP->getKmax(); float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float scaled_kr = Krate*constRegP->getMoleculesToMicromolarConversion()/d; float half_kr = Krate*0.5f; // variables used for solving background signal shape float aval = 0.0f; //new Solve HydrogenFlowInWell float one_over_two_tauB = 1.0f; float one_over_one_plus_aval = 1.0f/ (1.0f+aval); float red_hydro_prev; float fval_local = 0.0f; float red_hydro; float c_dntp_bot_plus_kmax = 1.0f/cp_sid_kmax_nucid; bool start_frame = true; // zero out frames before nuc start for (int i=0; i<startFrame; ++i) { fval[i] = 0; } for (int i=startFrame;i < endFrames;i++) { if (totgen > 0.0f) { float ldt = (deltaFrame[i]/( nucIntLoopSteps * FRAMESPERSEC)) * half_kr; for (int st=1; (st <= nucIntLoopSteps) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; // TODO Nucrise can be in shared memory...Need to fix it // All the threads should be grabbing from the same nucRise location. // c_dntp_bot is the concentration of dNTP in the well //float c_dntp_bot = LDG_ACCESS(nucRise, c_dntp_top_ndx++) / (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); float c_dntp_bot = nucRise[c_dntp_top_ndx++] / (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; float c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; /* if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 2) printf ("%d top_ndx %d nucrise %f skr %f pact %f dntps+kmax %f\n", i, c_dntp_top_ndx-1, nucRise[c_dntp_top_ndx-1],scaled_kr, pact,c_dntp_bot_plus_kmax ); */ // calculate new number of active polymerase float pact_new = poiss_cdf_approx_float4_SingelFLowFit(c_dntp_sum, LUTptr, occ_l, occ_r); /* if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 2) printf ("%d pn: %f cds: %f LU: %f %f %f %f ocl: %f ocr: %f\n", i, pact_new, c_dntp_sum, (*LUTptr).x,(*LUTptr).y,(*LUTptr).z,(*LUTptr).w, occ_l, occ_r ); */ totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; red_hydro = (totocc-totgen); }else{ red_hydro = totocc; } /* if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 2) printf ("%d df: %f rh: %f\n", i, deltaFrame[i], red_hydro); */ // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro *= sens; one_over_two_tauB = 1.0f/ (2.0f*tau); aval = deltaFrame[i]*one_over_two_tauB; //CP_SINGLEFLOWFIT one_over_one_plus_aval = 1.0f/ (1.0f+aval); if(start_frame) { //CP_SINGLEFLOWFIT fval_local = red_hydro; // *one_over_one_plus_aval; start_frame = false; } else { fval_local = red_hydro - red_hydro_prev + (1.0f-aval)*fval_local; // *one_over_one_plus_aval; } red_hydro_prev = red_hydro; fval_local *= one_over_one_plus_aval; fval[i] = fval_local * gain; } } __device__ void IncorporationSignalCalculation( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, const int startFrame, const float * nucRise, float A, const float Krate, const float tau, const float gain, const float SP, const float d, float sens, int c_dntp_top_ndx, float * fval, const float* deltaFrame, const int nucIntLoopSteps, const int endFrames ) { if ( A!=A ) A=0.0001f; // safety check if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety int ileft = ( int ) A; float idelta = A-ileft; int iright = ileft+1; float ifrac = 1-idelta; ileft--; iright--; float occ_l = ifrac; // lower mixture float occ_r = idelta; // upper mixture if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright == LAST_POISSON_TABLE_COL) { iright = ileft; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; float pact = occ_l + occ_r; const float4 * LUTptr = precompute_pois_LUT_params_SingelFLowFit (ileft, iright); float totocc = SP*A; float totgen = totocc; // We reuse this constant every loop... float cp_sid_kmax_nucid = perNucRegP->getKmax(); float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float scaled_kr = Krate*constRegP->getMoleculesToMicromolarConversion()/d; float half_kr = Krate*0.5f; float red_hydro; float c_dntp_bot_plus_kmax = 1.0f/cp_sid_kmax_nucid; // zero out frames before nuc start for (int i=0; i<startFrame; ++i) { fval[i] = 0; } for (int i=startFrame;i < endFrames;i++) { if (totgen > 0.0f) { float ldt = (deltaFrame[i]/( nucIntLoopSteps * FRAMESPERSEC)) * half_kr; for (int st=1; (st <= nucIntLoopSteps) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; // TODO Nucrise can be in shared memory...Need to fix it // All the threads should be grabbing from the same nucRise location. // c_dntp_bot is the concentration of dNTP in the well //float c_dntp_bot = LDG_ACCESS(nucRise, c_dntp_top_ndx++) / (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); float c_dntp_bot = nucRise[c_dntp_top_ndx++] / (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; float c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; /* if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 2) printf ("%d top_ndx %d nucrise %f skr %f pact %f dntps+kmax %f\n", i, c_dntp_top_ndx-1, nucRise[c_dntp_top_ndx-1],scaled_kr, pact,c_dntp_bot_plus_kmax ); */ // calculate new number of active polymerase float pact_new = poiss_cdf_approx_float4_SingelFLowFit(c_dntp_sum, LUTptr, occ_l, occ_r); /* if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 2) printf ("%d pn: %f cds: %f LU: %f %f %f %f ocl: %f ocr: %f\n", i, pact_new, c_dntp_sum, (*LUTptr).x,(*LUTptr).y,(*LUTptr).z,(*LUTptr).w, occ_l, occ_r ); */ totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; red_hydro = (totocc-totgen); }else{ red_hydro = totocc; } fval[i] = red_hydro * sens; } } __device__ void BkgModelPurpleTraceCalculation( const float *beadParamCube, const float *regionFrameCube, const float darkness, const float etbR, const float tauB, const float gain, const float *bkgTrace, const float* deltaFrame, const int num_frames, const int beadFrameStride, const int regionFrameStride, float *trace // both input and output ) { float one_over_two_tauB = 1.0f / (2.0f * tauB); float one_over_one_plus_aval = 0.0f; int i=0; float xt = LDG_ACCESS(deltaFrame, i) * one_over_two_tauB; // decay term one_over_one_plus_aval = 1.0f/(1.0f+xt); float red_prev = trace[i]; float purple_hdr = (red_prev + (etbR + xt)*LDG_ACCESS(bkgTrace,i)) * one_over_one_plus_aval; trace[i] = purple_hdr*gain + ApplyDarkMatterToFrame(beadParamCube, regionFrameCube, darkness, i, num_frames, beadFrameStride, regionFrameStride); i++; for (; i<num_frames; i++) { xt = LDG_ACCESS(deltaFrame, i) * one_over_two_tauB; one_over_one_plus_aval = 1.0f / (1.0f + xt); purple_hdr = ((trace[i] - red_prev) + (etbR + xt) * LDG_ACCESS(bkgTrace,i) - (etbR - xt) * LDG_ACCESS(bkgTrace , (i-1)) + (1.0f - xt) * purple_hdr) * one_over_one_plus_aval; red_prev = trace[i]; trace[i] = purple_hdr*gain + ApplyDarkMatterToFrame(beadParamCube, regionFrameCube, darkness, i, num_frames, beadFrameStride, regionFrameStride); } } __device__ void ComputeModelBasedTrace( const float *bkgTrace, const float* deltaFrame, const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, const float *beadParamCube, const float *regionFrameCube, const float *nucRise, const int startFrame, const float Krate, const float tauB, const float gain, const float SP, const float d, const float darkness, const float etbR, float sens, float A, int c_dntp_top_ndx, const int nucIntLoopSteps, const int num_frames, const int beadFrameStride, const int regionFrameStride, float *trace) { IncorporationSignalCalculation( constRegP, perNucRegP, startFrame, nucRise, A, Krate, tauB, gain, SP, d, sens, c_dntp_top_ndx, trace, deltaFrame, nucIntLoopSteps, num_frames); BkgModelPurpleTraceCalculation( beadParamCube, regionFrameCube, darkness, etbR, tauB, gain, bkgTrace, deltaFrame, num_frames, beadFrameStride, regionFrameStride, trace); } __device__ float GenerateStratifiedEmphasis_Dev( const int hpNum, const int frame, const float tcenter, const int *framesPerPoint, const float *frameNumber) { const float *emp = ConstGlobalP.getEmphParams(); float na = emp[0] + hpNum*emp[1]; float nb = emp[2] + hpNum*emp[3]; float db = emp[4] + hpNum*emp[5]; float deltat = frameNumber[frame] - tcenter; float EmphasisOffsetB = (deltat - nb) / ( ConstGlobalP.getEmphWidth()*db ); float tmp = ConstGlobalP.getEmphAmpl() * expf(-EmphasisOffsetB*EmphasisOffsetB); float EmphasisOffsetA = (deltat - na) / ConstGlobalP.getEmphWidth(); float empVal = framesPerPoint[frame]; if ((EmphasisOffsetA < 0.0f) && (deltat >= -3.0f)) empVal *= tmp; else if (EmphasisOffsetA >= 0.0f) empVal *= tmp * expf(-EmphasisOffsetA*EmphasisOffsetA); return empVal; } __device__ float GenerateBlankEmphasis_Dev( const int frame, const float tcenter, const int *framesPerPoint, const float *frameNumber) { const float *emp = ConstGlobalP.getEmphParams(); float deltat = frameNumber[frame] - tcenter; float EmphasisOffsetC = (deltat - emp[6]) / emp[7]; return (framesPerPoint[frame] * expf(-EmphasisOffsetC*EmphasisOffsetC)); } // generate emphasis vectors for all regions of a proton block // need frames per point // emphasis constants from gopt __global__ void GenerateEmphasis( const unsigned short * RegionMask, const int numEv, const float amult, const PerFlowParamsRegion *perFlowRegP, const int *framePerPoint, const float *RegionFrameCube, const size_t *numFramesRegion, float *emphasisVec, int *nonZeroEmphFrames) { extern __shared__ float smBuffer[]; __shared__ float empScale[MAX_POISSON_TABLE_COL]; int regId = blockIdx.x; if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; int num_frames = numFramesRegion[regId]; const float *frameNumber = RegionFrameCube + RfFrameNumber*ConstFrmP.getMaxCompFrames()*ImgRegP.getNumRegions() + regId*ConstFrmP.getMaxCompFrames(); perFlowRegP += regId; nonZeroEmphFrames += regId*numEv; emphasisVec += regId * numEv * ConstFrmP.getMaxCompFrames(); framePerPoint += regId * ConstFrmP.getMaxCompFrames(); float tmidNuc = perFlowRegP->getTMidNuc(); int empVecSize = numEv * num_frames; for (int i=0; i<empVecSize; i+=blockDim.x) { int serializedFrame = i + threadIdx.x; int hpNum = serializedFrame / num_frames; if (hpNum < numEv) { int frameToCompute = serializedFrame - (hpNum*num_frames); // generate different emphasis based on amult*empAmpl if ((amult * ConstGlobalP.getEmphAmpl()) > 0.0f) smBuffer[serializedFrame] = GenerateStratifiedEmphasis_Dev( hpNum, frameToCompute, tmidNuc, framePerPoint, frameNumber); else smBuffer[serializedFrame] = GenerateBlankEmphasis_Dev( frameToCompute, tmidNuc, framePerPoint, frameNumber); } } __syncthreads(); if (threadIdx.x < numEv) { float *myEmp = smBuffer + threadIdx.x * num_frames; float tmpScale = 0; for (int i=0; i<num_frames; ++i) { if (myEmp[i] < 0.0f ) myEmp[i] = 0.0f; tmpScale += myEmp[i]; } empScale[threadIdx.x] = (float)num_frames / tmpScale; } __syncthreads(); for (int frm=0; frm<empVecSize; frm += blockDim.x) { int serializedFrame = frm + threadIdx.x; int hpNum = serializedFrame / num_frames; if (hpNum < numEv) { emphasisVec[serializedFrame] = smBuffer[serializedFrame] * empScale[hpNum]; } } __syncthreads(); if (threadIdx.x < numEv) { int zeroCnt = 0; float *myEmp = emphasisVec + threadIdx.x*num_frames; for (int i=num_frames-1; i>=0; i--) { if (myEmp[i] <= CENSOR_THRESHOLD) zeroCnt++; else break; } nonZeroEmphFrames[threadIdx.x] = num_frames - zeroCnt; } __syncthreads(); } __device__ float instantSplineVal(float scaled_dt) { float last_nuc_value = 0.0f; if ((scaled_dt>0.0f)) { float scaled_dt_square = scaled_dt*scaled_dt; last_nuc_value = scaled_dt_square*(3.0f-2.0f*scaled_dt); //spline! with zero tangents at start and end points if (scaled_dt>1.0f) last_nuc_value =1.0f; } return(last_nuc_value); } __device__ int CalculateNucRise( const float tmidnuc, const float sigma, const float C, const float nuc_time_offset, const float *frame_times, const size_t numf, const int subSteps, float *nucRise ) { float tlast = 0.0f; float last_nuc_value = 0.0f; float my_inv_sigma = 1.0f/(3.0f*sigma); // bring back into range for ERF float scaled_dt = -1.0f; float scaled_end_dt = -1.0f; float scaled_end_nuc_time = nuc_time_offset*my_inv_sigma; float my_inv_sub_steps = 1.0f/((float)subSteps); bool start_uninitialized = true; int start = 0; for (int i=0; i < numf; i++) { // get the frame number of this data point (might be fractional because this point could be // the average of several frames of data. This number is the average time of all the averaged // data points float t=frame_times[i]; for (int st=1;st <= subSteps;st++) { float tnew = tlast + (t - tlast) * (float)st*my_inv_sub_steps; scaled_dt = (tnew - tmidnuc) * my_inv_sigma + 0.5f; scaled_end_dt = scaled_dt - scaled_end_nuc_time; last_nuc_value = instantSplineVal(scaled_dt); last_nuc_value -= instantSplineVal(scaled_end_dt); last_nuc_value *= C; *nucRise = last_nuc_value; nucRise++; } // first time point where we have a nonzero time if (start_uninitialized && (scaled_dt>0.0f)) { start = i; start_uninitialized=false; } tlast = t; } return start; }
the_stack
#include <thrust/extrema.h> #include <logger.h> #undef DEBUG_CHEBYSHEV_OUTPUT using namespace std; namespace amgx { namespace chebyshev_poly_smoother { template <typename ValueTypeA, typename ValueTypeB> struct jacobi_presmooth_functor { ValueTypeB omega; jacobi_presmooth_functor( ValueTypeB omega ) : omega( omega ) {} __host__ __device__ ValueTypeB operator()( const ValueTypeB &b, const ValueTypeA &d ) const { return isNotCloseToZero(d) ? omega * b / d : omega * b / epsilon(d); } }; template <typename ValueTypeA, typename ValueTypeB> struct jacobi_postsmooth_functor { ValueTypeB omega; jacobi_postsmooth_functor( ValueTypeB omega ) : omega( omega ) {} template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const { ValueTypeB x = thrust::get<0>(t); ValueTypeA d = thrust::get<1>(t); ValueTypeB b = thrust::get<2>(t); ValueTypeB y = thrust::get<3>(t); // return x + omega * (b - y) / d. d = isNotCloseToZero(d) ? d : epsilon(d); d = ValueTypeA( 1 ) / d; b -= y; b *= omega; return b * d + x; } }; template <typename ValueTypeB> struct add_functor { __host__ __device__ ValueTypeB operator()( const ValueTypeB &x, const ValueTypeB &y )const { return x + y; } }; // ----------------------------------- // KERNELS // ----------------------------------- template <typename ValueTypeB> __host__ __device__ ValueTypeB magicDampBeta(int m) { return M_PI / (4 * (ValueTypeB)m + 2); } template <typename ValueTypeB> __host__ __device__ ValueTypeB magicDamp(int n, ValueTypeB beta) { return cos(beta) * cos(beta) / (cos(beta * (2 * n + 1)) * cos(beta * (2 * n + 1)) - sin(beta) * sin(beta)); } template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int threads_per_block, int warps_per_block, bool diag> __global__ void getLambdaEstimate(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, const int num_rows, ValueTypeB *out) { int row_id = blockDim.x * blockIdx.x + threadIdx.x; ValueTypeB max_sum = (ValueTypeB)0.0; while (row_id < num_rows) { ValueTypeB cur_sum = (ValueTypeB)0.0; for (int j = row_offsets[row_id]; j < row_offsets[row_id + 1]; j++) { cur_sum += abs(values[j]); } if (diag) { cur_sum += abs(values[dia_indices[row_id]]); } max_sum = max(max_sum, cur_sum); row_id += gridDim.x * blockDim.x; } out[blockDim.x * blockIdx.x + threadIdx.x] = max_sum; } //-------------------------------- // Methods //-------------------------------- // Constructor template<class T_Config> ChebyshevPolySolver_Base<T_Config>::ChebyshevPolySolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope) { poly_order = cfg.AMG_Config::getParameter<int>("chebyshev_polynomial_order", cfg_scope); poly_order = min(10, max(poly_order, 1)); tau.resize(poly_order); } // Destructor template<class T_Config> ChebyshevPolySolver_Base<T_Config>::~ChebyshevPolySolver_Base() { this->tau.resize(0); } template<class T_Config> void ChebyshevPolySolver_Base<T_Config>::printSolverParameters() const { std::cout << "chebyshev_polynomial_order= " << this->poly_order << std::endl; } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_eigenmax_estimate(const Matrix_d &A, ValueTypeB &lambda) { #define LAMBDA_BLOCK_SIZE 256 VVector tsum(A.get_num_rows()); const int threads_per_block = 256; const int blockrows_per_cta = threads_per_block; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); if (A.hasProps(DIAG)) { cudaFuncSetCacheConfig(getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, true >, cudaFuncCachePreferL1); getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, true > <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, A.get_num_rows(), tsum.raw()); } else { cudaFuncSetCacheConfig(getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, false >, cudaFuncCachePreferL1); getLambdaEstimate < IndexType, ValueTypeA, ValueTypeB, LAMBDA_BLOCK_SIZE, LAMBDA_BLOCK_SIZE / 32, false > <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, A.get_num_rows(), tsum.raw()); } lambda = *(thrust::max_element(tsum.begin(), tsum.end())); #ifdef DEBUG_CHEBYSHEV_OUTPUT printf("Lambda for A on row %lu is: %f\n", thrust::max_element(tsum.begin(), tsum.end()) - tsum.begin(), lambda); #endif } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::compute_eigenmax_estimate(const Matrix_h &A, ValueTypeB &lambda) { FatalError("compute_eigenmax_estimate is not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } // Solver setup template<class T_Config> void ChebyshevPolySolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!A_as_matrix) { FatalError("ChebyshevPolySolver only works with explicit matrices", AMGX_ERR_INTERNAL); } ValueTypeB lambda = 0.0; compute_eigenmax_estimate( *A_as_matrix, lambda ); ValueTypeB beta = magicDampBeta<ValueTypeB>(poly_order); #ifdef DEBUG_CHEBYSHEV_OUTPUT int lvl = 0; A_as_matrix->getParameter("level", lvl); printf("Tau values for level %d : ", lvl); #endif for (int i = 0; i < poly_order; i++) { tau[i] = magicDamp(i, beta) / lambda; #ifdef DEBUG_CHEBYSHEV_OUTPUT printf("%f%s", tau[i], (i == poly_order - 1) ? "\n" : " "); #endif } } // template<class T_Config> void ChebyshevPolySolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool ChebyshevPolySolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; if (xIsZero) { x.dirtybit = 0; } if (!A_as_matrix->is_matrix_singleGPU()) { A_as_matrix->manager->exchange_halo_async(x, x.tag); if (A_as_matrix->getViewExterior() == A_as_matrix->getViewInterior()) { A_as_matrix->manager->exchange_halo_wait(x, x.tag); } } ViewType oldView = A_as_matrix->currentView(); ViewType flags; bool latencyHiding = true; if (A_as_matrix->is_matrix_singleGPU() || (x.dirtybit == 0)) { latencyHiding = false; A_as_matrix->setViewExterior(); flags = (ViewType)(A_as_matrix->getViewInterior() | A_as_matrix->getViewExterior()); } else { flags = A_as_matrix->getViewInterior(); A_as_matrix->setViewInterior(); } if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1) { smooth_1x1(*A_as_matrix, b, x, flags); } else { FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (latencyHiding) { A_as_matrix->manager->exchange_halo_wait(x, x.tag); A_as_matrix->setViewExterior(); flags = (ViewType)(~(A_as_matrix->getViewInterior()) & A_as_matrix->getViewExterior()); if (flags != 0) { if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1) { smooth_1x1(*A_as_matrix, b, x, flags); } else { FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } } x.dirtybit = 1; A_as_matrix->setView(oldView); return this->converged( b, x ); } template<class T_Config> void ChebyshevPolySolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_h &A, VVector &b, VVector &x, ViewType separation_flags) { FatalError("chebyshev poly smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <typename ValueTypeB> struct chebyshev_poly_functor { ValueTypeB tau; chebyshev_poly_functor( ValueTypeB tau ) : tau( tau ) {} template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const { ValueTypeB x = thrust::get<0>(t); ValueTypeB b = thrust::get<1>(t); ValueTypeB y = thrust::get<2>(t); return x + tau * (b - y); } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags) { if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); bool latencyHiding = separation_flags != A.getViewIntExt();// we change view only when do latency hiding, maybe it's better to use some explicit flag attached to matrix? for (int i = 0; i < this->poly_order; i++) { this->y.dirtybit = 0; //y = A*x multiply( A, x, this->y, separation_flags ); //x += tau_i * (b - y) thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( x.begin() + offset, b.begin() + offset, this->y.begin() + offset)), thrust::make_zip_iterator(thrust::make_tuple( x.begin() + num_rows, b.begin() + num_rows, this->y.begin() + num_rows)), x.begin() + offset, chebyshev_poly_functor<ValueTypeB>( this->tau[i] )); //Cublas::axpy(num_rows, (ValueTypeB)(this->tau[i]), b.raw(), 1, x.raw(), 1); //Cublas::axpy(num_rows, (ValueTypeB)(-1.0*this->tau[i]), this->y.raw(), 1, x.raw(), 1); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { FatalError("chebyshev poly smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_h &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { FatalError("M*M chebyshev poly smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ChebyshevPolySolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_d &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { FatalError("M*M chebyshev poly smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class ChebyshevPolySolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class ChebyshevPolySolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace chebyshev_poly_smoother } // namespace amgx
the_stack
#include <iostream> #include <iterator> #include <type_traits> #include <cassert> #include <gdf/gdf.h> #include <thrust/pair.h> #include "managed_allocator.cuh" #include "managed.cuh" #include "hash_functions.cuh" // TODO: replace this with CUDA_TRY and propagate the error #ifndef CUDA_RT_CALL #define CUDA_RT_CALL( call ) \ { \ cudaError_t cudaStatus = call; \ if ( cudaSuccess != cudaStatus ) { \ fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ exit(1); \ } \ } #endif // TODO: can we do this more efficiently? __inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare, int8_t val) { int32_t *base_address = (int32_t*)((char*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8); return (int8_t)atomicCAS(base_address, int_comp, int_val); } // TODO: can we do this more efficiently? __inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare, int16_t val) { int32_t *base_address = (int32_t*)((char*)address - ((size_t)address & 2)); int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8); return (int16_t)atomicCAS(base_address, int_comp, int_val); } __inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val) { return (int64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) { return (int64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template<typename pair_type> __forceinline__ __device__ pair_type load_pair_vectorized( const pair_type* __restrict__ const ptr ) { if ( sizeof(uint4) == sizeof(pair_type) ) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0,0,0,0}; converter.vec_val = *reinterpret_cast<const uint4*>(ptr); return converter.pair_val; } else if ( sizeof(uint2) == sizeof(pair_type) ) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0,0}; converter.vec_val = *reinterpret_cast<const uint2*>(ptr); return converter.pair_val; } else if ( sizeof(int) == sizeof(pair_type) ) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const int*>(ptr); return converter.pair_val; } else if ( sizeof(short) == sizeof(pair_type) ) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const short*>(ptr); return converter.pair_val; } else { return *ptr; } } template<typename pair_type> __forceinline__ __device__ void store_pair_vectorized( pair_type* __restrict__ const ptr, const pair_type val ) { if ( sizeof(uint4) == sizeof(pair_type) ) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0,0,0,0}; converter.pair_val = val; *reinterpret_cast<uint4*>(ptr) = converter.vec_val; } else if ( sizeof(uint2) == sizeof(pair_type) ) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0,0}; converter.pair_val = val; *reinterpret_cast<uint2*>(ptr) = converter.vec_val; } else if ( sizeof(int) == sizeof(pair_type) ) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<int*>(ptr) = converter.vec_val; } else if ( sizeof(short) == sizeof(pair_type) ) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<short*>(ptr) = converter.vec_val; } else { *ptr = val; } } template<typename value_type, typename size_type, typename key_type, typename elem_type> __global__ void init_hashtbl( value_type* __restrict__ const hashtbl_values, const size_type n, const key_type key_val, const elem_type elem_val) { const size_type idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx < n ) { store_pair_vectorized( hashtbl_values + idx, thrust::make_pair( key_val, elem_val ) ); } } template <typename T> struct equal_to { using result_type = bool; using first_argument_type = T; using second_argument_type = T; __forceinline__ __host__ __device__ constexpr bool operator()(const first_argument_type &lhs, const second_argument_type &rhs) const { return lhs == rhs; } }; template<typename Iterator> class cycle_iterator_adapter { public: using value_type = typename std::iterator_traits<Iterator>::value_type; using difference_type = typename std::iterator_traits<Iterator>::difference_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_type = Iterator; cycle_iterator_adapter() = delete; __host__ __device__ explicit cycle_iterator_adapter( const iterator_type& begin, const iterator_type& end, const iterator_type& current ) : m_begin( begin ), m_end( end ), m_current( current ) {} __host__ __device__ cycle_iterator_adapter& operator++() { if ( m_end == (m_current+1) ) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ const cycle_iterator_adapter& operator++() const { if ( m_end == (m_current+1) ) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ cycle_iterator_adapter& operator++(int) { cycle_iterator_adapter<iterator_type> old( m_begin, m_end, m_current); if ( m_end == (m_current+1) ) m_current = m_begin; else ++m_current; return old; } __host__ __device__ const cycle_iterator_adapter& operator++(int) const { cycle_iterator_adapter<iterator_type> old( m_begin, m_end, m_current); if ( m_end == (m_current+1) ) m_current = m_begin; else ++m_current; return old; } __host__ __device__ bool equal(const cycle_iterator_adapter<iterator_type>& other) const { return m_current == other.m_current && m_begin == other.m_begin && m_end == other.m_end; } __host__ __device__ reference& operator*() { return *m_current; } __host__ __device__ const reference& operator*() const { return *m_current; } __host__ __device__ const pointer operator->() const { return m_current.operator->(); } __host__ __device__ pointer operator->() { return m_current; } private: iterator_type m_current; iterator_type m_begin; iterator_type m_end; }; template <class T> __host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return lhs.equal(rhs); } template <class T> __host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return !lhs.equal(rhs); } /** * Does support concurrent insert, but not concurrent insert and probping. * * TODO: * - add constructor that takes pointer to hash_table to avoid allocations * - extend interface to accept streams */ template <typename Key, typename Element, typename size_type, Key unused_key, Element unused_element, typename Hasher = default_hash<Key>, typename Equality = equal_to<Key>, typename Allocator = managed_allocator<thrust::pair<Key, Element> >, bool count_collisions = false> class concurrent_unordered_multimap : public managed { public: using hasher = Hasher; using key_equal = Equality; using allocator_type = Allocator; using key_type = Key; using value_type = thrust::pair<Key, Element>; using mapped_type = Element; using iterator = cycle_iterator_adapter<value_type*>; using const_iterator = const cycle_iterator_adapter<value_type*>; private: union pair2longlong { unsigned long long int longlong; value_type pair; }; public: explicit concurrent_unordered_multimap(size_type n, const Hasher& hf = hasher(), const Equality& eql = key_equal(), const allocator_type& a = allocator_type()) : m_hf(hf), m_equal(eql), m_allocator(a), m_hashtbl_size(n), m_hashtbl_capacity(n), m_collisions(0) { m_hashtbl_values = m_allocator.allocate( m_hashtbl_capacity ); constexpr int block_size = 128; { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values ); if ( cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged ) { int dev_id = 0; CUDA_RT_CALL( cudaGetDevice( &dev_id ) ); CUDA_RT_CALL( cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size*sizeof(value_type), dev_id, 0) ); } } init_hashtbl<<<((m_hashtbl_size-1)/block_size)+1,block_size>>>( m_hashtbl_values, m_hashtbl_size, unused_key, unused_element ); CUDA_RT_CALL( cudaGetLastError() ); CUDA_RT_CALL( cudaStreamSynchronize(0) ); } ~concurrent_unordered_multimap() { m_allocator.deallocate( m_hashtbl_values, m_hashtbl_capacity ); } __host__ __device__ iterator begin() { return iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values ); } __host__ __device__ const_iterator begin() const { return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values ); } __host__ __device__ iterator end() { return iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values+m_hashtbl_size ); } __host__ __device__ const_iterator end() const { return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,m_hashtbl_values+m_hashtbl_size ); } __forceinline__ static constexpr __host__ __device__ key_type get_unused_key() { return unused_key; } /* --------------------------------------------------------------------------*/ /** * @Synopsis Inserts a (key, value) pair into the hash map * * @Param[in] x The (key, value) pair to insert * @Param[in] precomputed_hash A flag indicating whether or not a precomputed * hash value is passed in * @Param[in] precomputed_hash_value A precomputed hash value to use for determing * the write location of the key into the hash map instead of computing the * the hash value directly from the key * @Param[in] keys_are_equal An optional functor for comparing if two keys are equal * @tparam hash_value_type The datatype of the hash value * @tparam comparison_type The type of the key comparison functor * * @Returns An iterator to the newly inserted (key, value) pair */ /* ----------------------------------------------------------------------------*/ template < typename hash_value_type = typename Hasher::result_type, typename comparison_type = key_equal> __forceinline__ __device__ iterator insert(const value_type& x, bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0, comparison_type keys_are_equal = key_equal()) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if(true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(x.first); } size_type hash_tbl_idx = hash_value % hashtbl_size; value_type* it = 0; size_type attempt_counter{0}; while (0 == it) { value_type* tmp_it = hashtbl_values + hash_tbl_idx; if ( std::numeric_limits<key_type>::is_integer && std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int) == sizeof(value_type) ) { pair2longlong converter = {0ull}; converter.pair = thrust::make_pair( unused_key, unused_element ); const unsigned long long int unused = converter.longlong; converter.pair = x; const unsigned long long int value = converter.longlong; const unsigned long long int old_val = atomicCAS( reinterpret_cast<unsigned long long int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } else { const key_type old_key = atomicCAS( &(tmp_it->first), unused_key, x.first ); if ( keys_are_equal( unused_key, old_key ) ) { (m_hashtbl_values+hash_tbl_idx)->second = x.second; it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size; attempt_counter++; if( attempt_counter > hashtbl_size) { printf("Attempted to insert to multimap but the map is full!\n"); return this->end(); } } return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it); } /* --------------------------------------------------------------------------*/ /** * @Synopsis Searches for a key in the hash map and returns an iterator to the first * instance of the key in the map. * * @Param[in] the_key The key to search for * @Param[in] precomputed_hash A flag indicating whether or not a precomputed * hash value is passed in * @Param[in] precomputed_hash_value A precomputed hash value to use for determing * the write location of the key into the hash map instead of computing the * the hash value directly from the key * @Param[in] keys_are_equal An optional functor for comparing if two keys are equal * @tparam hash_value_type The datatype of the hash value * @tparam comparison_type The type of the key comparison functor * * @Returns An iterator to the first instance of the key in the map */ /* ----------------------------------------------------------------------------*/ template < typename hash_value_type = typename Hasher::result_type, typename comparison_type = key_equal> __forceinline__ __host__ __device__ const_iterator find(const key_type& the_key, bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0, comparison_type keys_are_equal = key_equal()) const { hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the location of the key if(true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the key else { hash_value = m_hf(the_key); } size_type hash_tbl_idx = hash_value % m_hashtbl_size; value_type* begin_ptr = 0; size_type counter = 0; while ( 0 == begin_ptr ) { value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx; const key_type tmp_val = tmp_ptr->first; if ( keys_are_equal( the_key, tmp_val ) ) { begin_ptr = tmp_ptr; break; } if ( keys_are_equal( unused_key , tmp_val ) || (counter > m_hashtbl_size) ) { begin_ptr = m_hashtbl_values + m_hashtbl_size; break; } hash_tbl_idx = (hash_tbl_idx+1)%m_hashtbl_size; ++counter; } return const_iterator( m_hashtbl_values,m_hashtbl_values+m_hashtbl_size,begin_ptr); } gdf_error assign_async( const concurrent_unordered_multimap& other, cudaStream_t stream = 0 ) { m_collisions = other.m_collisions; if ( other.m_hashtbl_size <= m_hashtbl_capacity ) { m_hashtbl_size = other.m_hashtbl_size; } else { m_allocator.deallocate( m_hashtbl_values, m_hashtbl_capacity ); m_hashtbl_capacity = other.m_hashtbl_size; m_hashtbl_size = other.m_hashtbl_size; m_hashtbl_values = m_allocator.allocate( m_hashtbl_capacity ); } CUDA_TRY( cudaMemcpyAsync( m_hashtbl_values, other.m_hashtbl_values, m_hashtbl_size*sizeof(value_type), cudaMemcpyDefault, stream ) ); return GDF_SUCCESS; } void clear_async( cudaStream_t stream = 0 ) { constexpr int block_size = 128; init_hashtbl<<<((m_hashtbl_size-1)/block_size)+1,block_size,0,stream>>>( m_hashtbl_values, m_hashtbl_size, unused_key, unused_element ); if ( count_collisions ) m_collisions = 0; } unsigned long long get_num_collisions() const { return m_collisions; } void print() { for (size_type i = 0; i < m_hashtbl_size; ++i) { std::cout<<i<<": "<<m_hashtbl_values[i].first<<","<<m_hashtbl_values[i].second<<std::endl; } } gdf_error prefetch( const int dev_id, cudaStream_t stream = 0 ) { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values ); if ( cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged ) { CUDA_TRY( cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size*sizeof(value_type), dev_id, stream) ); } CUDA_TRY( cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream) ); return GDF_SUCCESS; } private: const hasher m_hf; const key_equal m_equal; allocator_type m_allocator; size_type m_hashtbl_size; size_type m_hashtbl_capacity; value_type* m_hashtbl_values; unsigned long long m_collisions; }; #endif //CONCURRENT_UNORDERED_MULTIMAP_CUH
the_stack
extern "C" { #include <ccv.h> #include <ccv_internal.h> #include <nnc/ccv_nnc.h> #include <nnc/ccv_nnc_easy.h> #include <nnc/ccv_nnc_internal.h> } #include <nnc/gpu/ccv_nnc_compat.h> #ifdef HAVE_CUDA template<typename NUM> __global__ void _ccv_nnc_upsample_bilinear_forw_nchw(const int hw, const float rwidth, const float rheight, const int nc, const int adim2, const int ainc2, const int adim3, const int ainc3, const NUM* const a, const int binc2, const int bdim3, const int binc3, NUM* const b) { CUDA_1D_KERNEL_LOOP(i, hw) { const int xd = i % bdim3; const int yd = i / bdim3; const NUM* ap = a; NUM* bp = b; const float xs = (xd + 0.5) * rwidth - 0.5; const int xsi0 = (int)xs; const int xsi1 = ccv_min(xsi0 + 1, adim3 - 1); const float xsc1 = xs - xsi0; const float xsc0 = 1.0 - xsc1; const float ys = (yd + 0.5) * rheight - 0.5; const int ysi0 = (int)ys; const int ysi1 = ccv_min(ysi0 + 1, adim2 - 1); const float ysc1 = ys - ysi0; const float ysc0 = 1.0 - ysc1; for (int j = 0; j < nc; j++) { bp[xd + yd * binc3] = (NUM)((float)ap[xsi0 + ysi0 * ainc3] * xsc0 * ysc0 + (float)ap[xsi1 + ysi0 * ainc3] * xsc1 * ysc0 + (float)ap[xsi0 + ysi1 * ainc3] * xsc0 * ysc1 + (float)ap[xsi1 + ysi1 * ainc3] * xsc1 * ysc1); ap += ainc2; bp += binc2; } } } template<typename NUM> __global__ void _ccv_nnc_upsample_bilinear_forw_nhwc(const int hw, const float rwidth, const float rheight, const int n, const int c, const int adim1, const int ainc1, const int adim2, const int ainc2, const int ainc3, const NUM* const a, const int binc1, const int bdim2, const int binc2, const int binc3, NUM* const b) { CUDA_1D_KERNEL_LOOP(i, hw) { const int xd = i % bdim2; const int yd = i / bdim2; const NUM* ap = a; NUM* bp = b; const float xs = (xd + 0.5) * rwidth - 0.5; const int xsi0 = (int)xs; const int xsi1 = ccv_min(xsi0 + 1, adim2 - 1); const float xsc1 = xs - xsi0; const float xsc0 = 1.0 - xsc1; const float ys = (yd + 0.5) * rheight - 0.5; const int ysi0 = (int)ys; const int ysi1 = ccv_min(ysi0 + 1, adim1 - 1); const float ysc1 = ys - ysi0; const float ysc0 = 1.0 - ysc1; for (int j = 0; j < n; j++) { for (int k = 0; k < c; k++) bp[k + xd * binc3 + yd * binc2] = (NUM)((float)ap[k + xsi0 * ainc3 + ysi0 * ainc2] * xsc0 * ysc0 + (float)ap[k + xsi1 * ainc3 + ysi0 * ainc2] * xsc1 * ysc0 + (float)ap[k + xsi0 * ainc3 + ysi1 * ainc2] * xsc0 * ysc1 + (float)ap[k + xsi1 * ainc3 + ysi1 * ainc2] * xsc1 * ysc1); ap += ainc1; bp += binc1; } } } template<typename NUM> __global__ void _ccv_nnc_zero_back(const size_t tensor_count, NUM* const a) { CUDA_1D_KERNEL_LOOP(i, tensor_count) { a[i] = 0; } } template<typename NUM> __global__ void _ccv_nnc_upsample_bilinear_back_nchw(const size_t tensor_count, const float rwidth, const float rheight, const int adim2, const int ainc2, const int adim3, const int ainc3, NUM* const a, const int bdim2, const int binc2, const int bdim3, const int binc3, const NUM* const b) { CUDA_1D_KERNEL_LOOP(i, tensor_count) { const int xd = i % bdim3; const int idxyd = i / bdim3; const int yd = idxyd % bdim2; const int idx = idxyd / bdim2; NUM* const ap = a + idx * ainc2; const NUM* const bp = b + idx * binc2; const float xs = (xd + 0.5) * rwidth - 0.5; const int xsi0 = (int)xs; const int xsi1 = ccv_min(xsi0 + 1, adim3 - 1); const float xsc1 = xs - xsi0; const float xsc0 = 1.0 - xsc1; const float ys = (yd + 0.5) * rheight - 0.5; const int ysi0 = (int)ys; const int ysi1 = ccv_min(ysi0 + 1, adim2 - 1); const float ysc1 = ys - ysi0; const float ysc0 = 1.0 - ysc1; const float bpi = (float)__ldg(bp + xd + yd * binc3); atomicAdd(&ap[xsi0 + ysi0 * ainc3], (NUM)(bpi * xsc0 * ysc0)); atomicAdd(&ap[xsi1 + ysi0 * ainc3], (NUM)(bpi * xsc1 * ysc0)); atomicAdd(&ap[xsi0 + ysi1 * ainc3], (NUM)(bpi * xsc0 * ysc1)); atomicAdd(&ap[xsi1 + ysi1 * ainc3], (NUM)(bpi * xsc1 * ysc1)); } } template<typename NUM> __global__ void _ccv_nnc_upsample_bilinear_back_nhwc(const size_t tensor_count, const float rwidth, const float rheight, const int ch, const int adim1, const int ainc1, const int adim2, const int ainc2, const int ainc3, NUM* const a, const int bdim1, const int binc1, const int bdim2, const int binc2, const int binc3, const NUM* const b) { CUDA_1D_KERNEL_LOOP(i, tensor_count) { const int xd = i % bdim2; const int idxyd = i / bdim2; const int yd = idxyd % bdim1; const int idx = idxyd / bdim1; NUM* const ap = a + idx * ainc1; const NUM* const bp = b + idx * binc1; const float xs = (xd + 0.5) * rwidth - 0.5; const int xsi0 = (int)xs; const int xsi1 = ccv_min(xsi0 + 1, adim2 - 1); const float xsc1 = xs - xsi0; const float xsc0 = 1.0 - xsc1; const float ys = (yd + 0.5) * rheight - 0.5; const int ysi0 = (int)ys; const int ysi1 = ccv_min(ysi0 + 1, adim1 - 1); const float ysc1 = ys - ysi0; const float ysc0 = 1.0 - ysc1; for (int c = 0; c < ch; c++) { const float bpi = (float)__ldg(bp + c + xd * binc3 + yd * binc2); atomicAdd(&ap[c + xsi0 * ainc3 + ysi0 * ainc2], (NUM)(bpi * xsc0 * ysc0)); atomicAdd(&ap[c + xsi1 * ainc3 + ysi0 * ainc2], (NUM)(bpi * xsc1 * ysc0)); atomicAdd(&ap[c + xsi0 * ainc3 + ysi1 * ainc2], (NUM)(bpi * xsc0 * ysc1)); atomicAdd(&ap[c + xsi1 * ainc3 + ysi1 * ainc2], (NUM)(bpi * xsc1 * ysc1)); } } } static int _ccv_nnc_upsample_bilinear_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { assert(input_size >= 1); assert(output_size >= 1); cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context); ccv_nnc_tensor_view_t* const a = (ccv_nnc_tensor_view_t*)inputs[0]; ccv_nnc_tensor_view_t* const b = (ccv_nnc_tensor_view_t*)outputs[0]; assert(ccv_nnc_tensor_nd(a->info.dim) <= CCV_NNC_MAX_DIM + 2); assert(ccv_nnc_tensor_nd(b->info.dim) <= CCV_NNC_MAX_DIM + 2); // Assuming this is float 32. int adim[CCV_NNC_MAX_DIM_ALLOC]; int bdim[CCV_NNC_MAX_DIM_ALLOC]; ccv_nnc_tensor_view_get_dim(a, adim); ccv_nnc_tensor_view_get_dim(b, bdim); int ainc[CCV_NNC_MAX_DIM_ALLOC]; int binc[CCV_NNC_MAX_DIM_ALLOC]; assert(CCV_NNC_MAX_DIM == 2); // Need to change this logic for CCV_NNC_MAX_DIM == other number. ccv_nnc_tensor_view_get_inc(a, ainc); ccv_nnc_tensor_view_get_inc(b, binc); assert(a->info.format == b->info.format); assert(a->info.datatype == b->info.datatype); assert(a->info.datatype == CCV_32F); if (a->info.format == CCV_TENSOR_FORMAT_NCHW) { assert(adim[0] == bdim[0]); assert(adim[1] == bdim[1]); const int hw = bdim[2] * bdim[3]; const float rheight = (float)adim[2] / bdim[2]; const float rwidth = (float)adim[3] / bdim[3]; assert(rheight <= 1); assert(rwidth <= 1); _ccv_nnc_upsample_bilinear_forw_nchw<<<CUDA_GET_BLOCKS(hw), CUDA_NUM_THREADS, 0, stream>>>(hw, rwidth, rheight, adim[0] * adim[1], adim[2], ainc[2] * ainc[3], adim[3], ainc[3], a->data.f32, binc[2] * binc[3], bdim[3], binc[3], b->data.f32); } else { assert(a->info.format == CCV_TENSOR_FORMAT_NHWC || a->info.format == CCV_TENSOR_FORMAT_CHWN); const float rheight = (float)adim[1] / bdim[1]; const float rwidth = (float)adim[2] / bdim[2]; assert(rheight <= 1); assert(rwidth <= 1); const int hw = bdim[1] * bdim[2]; _ccv_nnc_upsample_bilinear_forw_nhwc<<<CUDA_GET_BLOCKS(hw), CUDA_NUM_THREADS, 0, stream>>>(hw, rwidth, rheight, adim[0], adim[3], adim[1], ainc[1] * ainc[2] * ainc[3], adim[2], ainc[2] * ainc[3], ainc[3], a->data.f32, binc[1] * binc[2] * binc[3], bdim[2], binc[2] * binc[3], binc[3], b->data.f32); } return CCV_NNC_EXEC_SUCCESS; } static int _ccv_nnc_upsample_bilinear_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { assert(input_size >= 1); assert(output_size >= 1); cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context); ccv_nnc_tensor_view_t* const a = (ccv_nnc_tensor_view_t*)outputs[0]; ccv_nnc_tensor_view_t* const b = (ccv_nnc_tensor_view_t*)inputs[0]; assert(ccv_nnc_tensor_nd(a->info.dim) <= CCV_NNC_MAX_DIM + 2); assert(ccv_nnc_tensor_nd(b->info.dim) <= CCV_NNC_MAX_DIM + 2); // Assuming this is float 32. int adim[CCV_NNC_MAX_DIM_ALLOC]; int bdim[CCV_NNC_MAX_DIM_ALLOC]; ccv_nnc_tensor_view_get_dim(a, adim); ccv_nnc_tensor_view_get_dim(b, bdim); int ainc[CCV_NNC_MAX_DIM_ALLOC]; int binc[CCV_NNC_MAX_DIM_ALLOC]; assert(CCV_NNC_MAX_DIM == 2); // Need to change this logic for CCV_NNC_MAX_DIM == other number. ccv_nnc_tensor_view_get_inc(a, ainc); ccv_nnc_tensor_view_get_inc(b, binc); assert(a->info.format == b->info.format); assert(a->info.datatype == b->info.datatype); assert(a->info.datatype == CCV_32F); const size_t a_tensor_count = ccv_nnc_tensor_count(a->info); _ccv_nnc_zero_back<<<CUDA_GET_BLOCKS(a_tensor_count), CUDA_NUM_THREADS, 0, stream>>>(a_tensor_count, a->data.f32); if (a->info.format == CCV_TENSOR_FORMAT_NCHW) { const size_t tensor_count = ccv_nnc_tensor_count(b->info); const float rheight = (float)adim[2] / bdim[2]; const float rwidth = (float)adim[3] / bdim[3]; assert(rheight <= 1); assert(rwidth <= 1); _ccv_nnc_upsample_bilinear_back_nchw<<<CUDA_GET_BLOCKS(tensor_count), CUDA_NUM_THREADS, 0, stream>>>(tensor_count, rwidth, rheight, adim[2], ainc[2] * ainc[3], adim[3], ainc[3], a->data.f32, bdim[2], binc[2] * binc[3], bdim[3], binc[3], b->data.f32); } else { assert(a->info.format == CCV_TENSOR_FORMAT_NHWC || a->info.format == CCV_TENSOR_FORMAT_CHWN); const float rheight = (float)adim[1] / bdim[1]; const float rwidth = (float)adim[2] / bdim[2]; assert(rheight <= 1); assert(rwidth <= 1); const size_t tensor_count = ccv_nnc_tensor_count(b->info) / adim[3]; _ccv_nnc_upsample_bilinear_back_nhwc<<<CUDA_GET_BLOCKS(tensor_count), CUDA_NUM_THREADS, 0, stream>>>(tensor_count, rwidth, rheight, adim[3], adim[1], ainc[1] * ainc[2] * ainc[3], adim[2], ainc[2] * ainc[3], ainc[3], a->data.f32, bdim[1], binc[1] * binc[2] * binc[3], bdim[2], binc[2] * binc[3], binc[3], b->data.f32); } return CCV_NNC_EXEC_SUCCESS; } #endif REGISTER_COMMAND_BACKEND(CCV_NNC_UPSAMPLE_BILINEAR_FORWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDA registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC; registry->tensor_datatypes = CCV_32F; // Currently only support CCV_32F because atomicAdd only supports __half at sm_70. I will revisit this by either get rid of atomicAdd or deprecate support for Jetson Nano / TX2. registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_upsample_bilinear_forw; #endif } REGISTER_COMMAND_BACKEND(CCV_NNC_UPSAMPLE_BILINEAR_BACKWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDA registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC; registry->tensor_datatypes = CCV_32F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_upsample_bilinear_back; #endif }
the_stack
#define MOD_NAMESPACE #define MOD_NAMESPACE_NAME fmitest #define MOD_NAMESPACE_BEGIN namespace fmitest { #define MOD_NAMESPACE_END } //#define NVBIO_CUDA_DEBUG //#define NVBIO_CUDA_ASSERTS #include <nvbio/basic/omp.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/dna.h> #include <nvbio/basic/cached_iterator.h> #include <nvbio/basic/packedstream.h> #include <nvbio/basic/deinterleaved_iterator.h> #include <nvbio/fmindex/bwt.h> #include <nvbio/fmindex/ssa.h> #include <nvbio/fmindex/fmindex.h> #include <nvbio/fmindex/backtrack.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/io/fmindex/fmindex.h> using namespace nvbio; struct ssa_nop {}; namespace { // anonymous namespace template <uint32 OCC_INTERVAL,typename FMIndexType, typename word_type> __global__ void locate_kernel( const uint32 n_queries, const uint32 QUERY_LEN, const uint32 genome_length, const word_type* genome_stream, const FMIndexType fmi, const uint32* input, uint32* output) { typedef typename FMIndexType::index_type index_type; typedef typename FMIndexType::range_type range_type; const uint32 thread_id = threadIdx.x + blockIdx.x*blockDim.x; if (thread_id >= n_queries) return; typedef const_cached_iterator<const word_type*> cached_stream_type; typedef PackedStream<cached_stream_type,uint8,2,true,index_type> genome_stream_type; const cached_stream_type cached_genome_stream( genome_stream ); const genome_stream_type genome( cached_genome_stream ); const range_type range = match( fmi, genome + input[ thread_id ], QUERY_LEN ); output[ thread_id ] = uint32( locate( fmi, range.x ) ); } // test the gpu SSA against the cpu one template <typename SSA_device, typename SSA_host> void test_ssa( const SSA_device& ssa_device, const SSA_host& ssa) { thrust::host_vector<typename SSA_device::value_type> d_ssa = ssa_device.m_ssa; for (uint32 i = 0; i < d_ssa.size(); ++i) { if (d_ssa[i] != ssa.m_ssa[i]) { fprintf(stderr, " \nerror : expected SSA[%u] = %u, got: %u\n", i, (uint32)ssa.m_ssa[i], (uint32)d_ssa[i]); exit(1); } } } template <typename index_type> struct HostData { uint32 primary; thrust::host_vector<index_type> text; thrust::host_vector<index_type> bwt; thrust::host_vector<index_type> occ; thrust::host_vector<index_type> bwt_occ; thrust::host_vector<index_type> L2; thrust::host_vector<uint32> count_table; thrust::host_vector<uint32> input; thrust::host_vector<uint32> output; }; template <typename index_type> struct DeviceData { uint32 primary; thrust::device_vector<index_type> text; thrust::device_vector<index_type> bwt; thrust::device_vector<index_type> occ; thrust::device_vector<index_type> bwt_occ; thrust::device_vector<index_type> L2; thrust::device_vector<uint32> count_table; thrust::device_vector<uint32> input; thrust::device_vector<uint32> output; DeviceData(const HostData<index_type>& data) : primary( data.primary ), text( data.text ), bwt( data.bwt ), occ( data.occ ), bwt_occ( data.bwt_occ ), L2( data.L2 ), count_table( data.count_table ), input( data.input ), output( data.output ) {} }; template <uint32 OCC_INT, uint32 SA_INT, typename BwtIterator, typename OccIterator, typename SSA, typename index_type> void do_synthetic_test_device( const uint32 REQS, const uint32 LEN, const uint32 PLEN, const HostData<index_type>& host_data, const SSA& ssa, DeviceData<index_type>& device_data, const OccIterator occ_it, const BwtIterator bwt_it) { typedef cuda::ldg_pointer<uint32> count_table_type; const count_table_type count_table( thrust::raw_pointer_cast( &device_data.count_table.front() ) ); typedef PackedStream<BwtIterator,uint8,2u,true,index_type> bwt_type; typedef rank_dictionary< 2u, OCC_INT, bwt_type, OccIterator, count_table_type > rank_dict_type; rank_dict_type rank_dict( bwt_type( bwt_it ), occ_it, count_table ); typedef SSA_index_multiple_context<SA_INT,const index_type*> ssa_type; typedef fm_index< rank_dict_type, ssa_type > fm_index_type; fm_index_type temp_fmi( LEN, device_data.primary, thrust::raw_pointer_cast( &device_data.L2.front() ), rank_dict, ssa_type() ); //SSA_value_multiple_device ssa_device( ssa ); //SSA_index_multiple_device<SA_INT> ssa_device( ssa ); fprintf(stderr, " SSA gpu... started\n" ); Timer timer; timer.start(); SSA_index_multiple_device<SA_INT,index_type> ssa_device( temp_fmi ); timer.stop(); fprintf(stderr, " SSA gpu... done: %.3fs\n", timer.seconds() ); // test the gpu SSA against the cpu one test_ssa( ssa_device, ssa ); fprintf(stderr, " gpu alignment... started\n"); fm_index_type fmi( LEN, device_data.primary, thrust::raw_pointer_cast( &device_data.L2.front() ), rank_dict, ssa_device.get_context() ); cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); const uint32 BLOCK_SIZE = 256; const uint32 n_blocks = (REQS + BLOCK_SIZE-1) / BLOCK_SIZE; locate_kernel<OCC_INT> <<<n_blocks,BLOCK_SIZE>>>( REQS, PLEN, LEN, thrust::raw_pointer_cast( &device_data.text.front() ), fmi, thrust::raw_pointer_cast( &device_data.input.front() ), thrust::raw_pointer_cast( &device_data.output.front() ) ); cudaThreadSynchronize(); float time; cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); fprintf(stderr, " gpu alignment... done: %.1fms, A/s: %.2f M\n", time, REQS/(time*1000.0f) ); thrust::host_vector<uint32> output_h( device_data.output ); for (uint32 i = 0; i < REQS; ++i) { if (host_data.output[i] != output_h[i]) { fprintf(stderr, "\nerror : mismatch at %u: expected %u, got %u\n", i, host_data.output[i], output_h[i] ); exit(1); } } } template <uint32 OCC_INT, uint32 SA_INT, typename SSA> void synthetic_test_device( const uint32 REQS, const uint32 LEN, const uint32 PLEN, const uint32 WORDS, const uint32 OCC_WORDS, const HostData<uint32>& host_data, const SSA& ssa) { try { DeviceData<uint32> device_data( host_data ); // test an FM-index with separate bwt/occ tables { typedef cuda::ldg_pointer<uint4> iterator_type; iterator_type occ_it( (const uint4*)thrust::raw_pointer_cast( &device_data.occ.front() ) ); iterator_type bwt_it( (const uint4*)thrust::raw_pointer_cast( &device_data.bwt.front() ) ); do_synthetic_test_device<OCC_INT, SA_INT>( REQS, LEN, PLEN, host_data, ssa, device_data, occ_it, bwt_it ); } // test an FM-index with interleaved bwt/occ tables if (WORDS == OCC_WORDS) { typedef cuda::ldg_pointer<uint4> bwt_occ_texture; bwt_occ_texture bwt_occ_tex( (const uint4*)thrust::raw_pointer_cast( &device_data.bwt_occ.front() ) ); typedef deinterleaved_iterator<2,0,bwt_occ_texture> bwt_iterator; typedef deinterleaved_iterator<2,1,bwt_occ_texture> occ_iterator; occ_iterator occ_it( bwt_occ_tex ); bwt_iterator bwt_it( bwt_occ_tex ); do_synthetic_test_device<OCC_INT, SA_INT>( REQS, LEN, PLEN, host_data, ssa, device_data, occ_it, bwt_it ); } } catch (std::exception exception) { fprintf(stderr, " \nerror : exception caught : %s\n", exception.what()); exit(1); } catch (...) { fprintf(stderr, " \nerror : unknown exception\n"); exit(1); } } template <uint32 OCC_INT, uint32 SA_INT, typename SSA> void synthetic_test_device( const uint32 REQS, const uint32 LEN, const uint32 PLEN, const uint32 WORDS, const uint32 OCC_WORDS, const HostData<uint64>& host_data, const SSA& ssa) { try { DeviceData<uint64> device_data( host_data ); // test an FM-index with separate bwt/occ tables { typedef cuda::ldg_pointer<uint64> iterator_type; iterator_type occ_it( (const uint64*)thrust::raw_pointer_cast( &device_data.occ.front() ) ); iterator_type bwt_it( (const uint64*)thrust::raw_pointer_cast( &device_data.bwt.front() ) ); do_synthetic_test_device<OCC_INT, SA_INT>( REQS, LEN, PLEN, host_data, ssa, device_data, occ_it, bwt_it ); } // test an FM-index with interleaved bwt/occ tables if (WORDS == OCC_WORDS) { typedef cuda::ldg_pointer<uint64> bwt_occ_texture; bwt_occ_texture bwt_occ_tex( (const uint64*)thrust::raw_pointer_cast( &device_data.bwt_occ.front() ) ); typedef deinterleaved_iterator<2,0,bwt_occ_texture> bwt_iterator; typedef deinterleaved_iterator<2,1,bwt_occ_texture> occ_iterator; occ_iterator occ_it( bwt_occ_tex ); bwt_iterator bwt_it( bwt_occ_tex ); do_synthetic_test_device<OCC_INT, SA_INT>( REQS, LEN, PLEN, host_data, ssa, device_data, occ_it, bwt_it ); } } catch (std::exception exception) { fprintf(stderr, " \nerror : exception caught : %s\n", exception.what()); exit(1); } catch (...) { fprintf(stderr, " \nerror : unknown exception\n"); exit(1); } } // perform an alignment test on the cpu // template < typename TextType, typename FMIndexType, typename index_type> void synthetic_test_host( const uint32 REQS, const uint32 PLEN, const TextType text, const FMIndexType fmi, HostData<index_type>& data) { fprintf(stderr, " cpu alignment... started" ); typedef typename FMIndexType::range_type range_type; Timer timer; timer.start(); for (uint32 i = 0; i < REQS; ++i) { if ((i & 1023) == 0) fprintf(stderr, "\r cpu alignment... started: %.1f%% ", 100.0f*float(i)/float(REQS) ); const range_type range = match( fmi, text + data.input[i], PLEN ); if (range.y < range.x) { fprintf(stderr, " \nerror: unable to match pattern %u\n", data.input[i]); exit(1); } data.output[i] = uint32( locate( fmi, range.x ) ); } timer.stop(); fprintf(stderr, "\n cpu alignment... done: %.1fms, A/s: %.2f M\n", timer.seconds()*1000.0f, REQS/(timer.seconds()*1.0e6f) ); } } // anonymous namespace template <typename index_type> void synthetic_test(const uint32 LEN, const uint32 QUERIES) { fprintf(stderr, " %u-bits synthetic test\n", uint32(sizeof(index_type)*8)); const uint32 OCC_INT = sizeof(index_type) == sizeof(uint32) ? 64 : 128; const uint32 SA_INT = 32; const uint32 SYM_PER_WORD = 4*sizeof(index_type); const uint32 PLEN = 8; const uint32 REQS = nvbio::min( uint32(LEN-PLEN-1u), QUERIES ); const uint32 WORDS = (LEN+SYM_PER_WORD-1)/SYM_PER_WORD; const uint32 OCC_WORDS = ((LEN+OCC_INT-1) / OCC_INT) * 4; Timer timer; const uint64 memory_footprint = sizeof(index_type)*WORDS + sizeof(index_type)*WORDS + sizeof(index_type)*OCC_WORDS + sizeof(index_type)*uint64(LEN+SA_INT)/SA_INT; fprintf(stderr, " memory : %.1f MB\n", float(memory_footprint)/float(1024*1024)); HostData<index_type> data; data.text.resize( align<4>(WORDS), 0u ); data.bwt.resize( align<4>(WORDS), 0u ); data.occ.resize( align<4>(OCC_WORDS), 0u ); data.L2.resize( 5 ); data.count_table.resize( 256 ); data.input.resize( REQS ); data.output.resize( REQS ); typedef PackedStream<index_type*,uint8,2,true,index_type> stream_type; stream_type text( &data.text[0] ); for (uint32 i = 0; i < LEN; ++i) text[i] = (rand() % 4); // print the string if (LEN < 64) { char string[64]; dna_to_string( text, text + LEN, string ); fprintf(stderr, " string : %s\n", string); } // generate the suffix array std::vector<int32> sa( LEN+1, 0u ); gen_sa( LEN, text, &sa[0] ); stream_type bwt( &data.bwt[0] ); data.primary = gen_bwt_from_sa( LEN, text, &sa[0], bwt ); // set sa[0] to -1 so as to get a modulo for free sa[0] = -1; // print the string if (LEN < 64) { char string[64]; dna_to_string( bwt, bwt + LEN, string ); fprintf(stderr, " bwt : %s\n", string); } fprintf(stderr," primary : %d\n", data.primary ); // buld the occurrence table build_occurrence_table<2u,OCC_INT>( bwt, bwt + LEN, &data.occ[0], &data.L2[1] ); // transform the L2 table into a cumulative sum data.L2[0] = 0; for (uint32 c = 0; c < 4; ++c) data.L2[c+1] += data.L2[c]; // print the L2 if (LEN < 64) { for (uint32 i = 0; i < 5; ++i) fprintf(stderr, " L2[%u] : %u\n", i, uint32( data.L2[i] )); } // generate the count table gen_bwt_count_table( &data.count_table[0] ); // build the interleaved bwt/occ array if (WORDS == OCC_WORDS) { fprintf(stderr, " building interleaved bwt/occ... started\n" ); data.bwt_occ.resize( WORDS*2 ); if (sizeof(index_type) == 4) { for (uint32 w = 0; w < WORDS; w += 4) { data.bwt_occ[ w*2+0 ] = data.bwt[ w+0 ]; data.bwt_occ[ w*2+1 ] = data.bwt[ w+1 ]; data.bwt_occ[ w*2+2 ] = data.bwt[ w+2 ]; data.bwt_occ[ w*2+3 ] = data.bwt[ w+3 ]; data.bwt_occ[ w*2+4 ] = data.occ[ w+0 ]; data.bwt_occ[ w*2+5 ] = data.occ[ w+1 ]; data.bwt_occ[ w*2+6 ] = data.occ[ w+2 ]; data.bwt_occ[ w*2+7 ] = data.occ[ w+3 ]; } } else { for (uint32 w = 0; w < WORDS; ++w) { data.bwt_occ[ w*2+0 ] = data.bwt[ w ]; data.bwt_occ[ w*2+1 ] = data.occ[ w ]; } } fprintf(stderr, " building interleaved bwt/occ... done\n" ); } typedef PackedStream<const index_type*,uint8,2u,true,index_type> bwt_type; typedef rank_dictionary<2u, OCC_INT, bwt_type, const index_type*, const uint32*> rank_dict_type; typedef fm_index<rank_dict_type, ssa_nop> temp_fm_index_type; temp_fm_index_type temp_fmi( LEN, data.primary, &data.L2[0], rank_dict_type( bwt_type( &data.bwt[0] ), &data.occ[0], &data.count_table[0] ), ssa_nop() ); #if 0 // test the Sampled Suffix Array class typedef SSA_value_multiple SSA_type; SSA_value_multiple ssa( temp_fmi, SA_INT ); SSA_value_multiple::context_type ssa_context = ssa.get_context(); #else // test the Sampled Suffix Array class typedef SSA_index_multiple<SA_INT,index_type> SSA_type; timer.start(); SSA_type ssa( temp_fmi ); timer.stop(); fprintf(stderr, " SSA cpu time: %.3fs\n", timer.seconds() ); typename SSA_type::context_type ssa_context = ssa.get_context(); #endif fprintf(stderr, " SSA test... started\n" ); for (uint32 i = 1; i < LEN; ++i) { index_type val; if (ssa_context.fetch( index_type(i), val ) && (val != (uint32)sa[i])) { fprintf(stderr, " SSA mismatch at %u: expected %d, got: %u\n", i, uint32( sa[i] ), uint32( val )); exit(1); } } fprintf(stderr, " SSA test... done\n" ); typedef fm_index<rank_dict_type, typename SSA_type::context_type> fm_index_type; fm_index_type fmi( LEN, data.primary, &data.L2[0], rank_dict_type( bwt_type( &data.bwt[0] ), &data.occ[0], &data.count_table[0] ), ssa_context ); typedef typename fm_index_type::range_type range_type; uint8 pattern[PLEN]; char pattern_str[PLEN+1]; fprintf(stderr, " alignment test... started:" ); for (uint32 i = 0; i < 1000; ++i) { fprintf(stderr, "\r alignment test... started: %.1f%% ", 100.0f*float(i)/1000.0f ); for (uint32 j = 0; j < PLEN; ++j) pattern[j] = text[i+j]; dna_to_string( pattern, pattern + PLEN, pattern_str ); range_type range = match( fmi, pattern, PLEN ); if (range.x > range.y) { fprintf(stderr, " \nerror : searching for %s @ %u, resulted in (%u,%u)\n", pattern_str, i, uint32( range.x ), uint32( range.y )); exit(1); } // locate the first 100 alignments range.y = nvbio::min( range.x + 10u, range.y ); for (index_type x = range.x; x <= range.y; ++x) { const uint32 prefix = locate( fmi, x ); if (prefix >= LEN) { const range_type inv = inv_psi( fmi, x ); fprintf(stderr, " \nerror : searching for %s @ %u, resulted in prefix out of bounds: %u (= sa[%u] + %u)\n", pattern_str, i, prefix, uint32(inv.x), uint32(inv.y)); exit(1); } char found_str[PLEN+1]; dna_to_string( text + prefix, text + prefix + PLEN, found_str ); if (strcmp( found_str, pattern_str ) != 0) { const range_type inv = inv_psi( fmi, x ); fprintf(stderr, " \nerror : locating %s @ %u at SA=%u in SA(%u,%u), resulted in %s @ %u (= sa[%u] + %u)\n", pattern_str, i, uint32( x ), uint32( range.x ), uint32( range.y ), found_str, prefix, uint32(inv.x), uint32(inv.y)); exit(1); } /*{ const uint2 inv = inv_psi( fmi, x ); fprintf(stderr, " locating %s @ %u at %u, matched at %u (= sa[%u] + %u)\n", pattern_str, i, x, prefix, inv.x, inv.y); }*/ } } fprintf(stderr, "\n alignment test... done\n" ); const uint32 SPARSITY = 100; data.input[0] = 0; for (uint32 i = 1; i < REQS; ++i) data.input[i] = (data.input[i-1] + (rand() % SPARSITY)) % (LEN - PLEN); fprintf(stderr, " sorted alignment tests... started\n" ); synthetic_test_host( REQS, PLEN, text, fmi, data ); synthetic_test_device<OCC_INT,SA_INT>( REQS, LEN, PLEN, WORDS, OCC_WORDS, data, ssa ); fprintf(stderr, " sorted alignment tests... done\n" ); fprintf(stderr, " shuffled alignment tests... started\n" ); for (uint32 i = 0; i < REQS; ++i) { const uint32 j = i + rand() % (REQS - i); std::swap( data.input[i], data.input[j] ); } synthetic_test_host( REQS, PLEN, text, fmi, data ); synthetic_test_device<OCC_INT,SA_INT>( REQS, LEN, PLEN, WORDS, OCC_WORDS, data, ssa ); fprintf(stderr, " shuffled alignment tests... done\n" ); } // // A backtracking delegate used to count the total number of occurrences // struct CountDelegate { // constructor // // \param count pointer to the global counter NVBIO_FORCEINLINE NVBIO_HOST_DEVICE CountDelegate(uint32* count) : m_count( count ) {} // main functor operator // NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void operator() (const uint2 range) const { #if defined(NVBIO_DEVICE_COMPILATION) atomicAdd( m_count, range.y + 1u - range.x ); #else *m_count += range.y + 1u - range.x; #endif } private: uint32* m_count; // global counter }; // // k-mer counting kernel // template <typename ReadsView, typename FMIndexType> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void count_core( const uint32 read_id, // read id const ReadsView reads, // reads view const FMIndexType fmi, // FM-index const uint32 len, // pattern length const uint32 seed, // exact-matching seed length const uint32 mismatches, // number of allowed mismatches after the seed uint32* count) // global output counter { CountDelegate counter( count ); typedef typename ReadsView::sequence_stream_type read_stream_type; uint4 stack[32*4]; hamming_backtrack( fmi, reads.get_read( read_id ).begin(), len, seed, mismatches, stack, counter ); } // // k-mer counting kernel // template <typename ReadsView, typename FMIndexType> __global__ void count_kernel( const ReadsView reads, // reads view const FMIndexType fmi, // FM-index const uint32 len, // pattern length const uint32 seed, // exact-matching seed length const uint32 mismatches, // number of allowed mismatches after the seed uint32* count) // global output counter { const uint32 thread_id = threadIdx.x + blockIdx.x*blockDim.x; if (thread_id >= reads.size()) return; count_core( thread_id, reads, fmi, len, seed, mismatches, count ); } // // run a set of backtracking tests with real data // void backtrack_test(const char* index_file, const char* reads_name, const uint32 n_reads) { io::FMIndexDataHost h_fmi; if (h_fmi.load( index_file, io::FMIndexData::FORWARD )) { typedef io::FMIndexData::partial_fm_index_type host_fmindex_type; typedef io::FMIndexDataDevice::fm_index_type cuda_fmindex_type; io::FMIndexDataDevice d_fmi( h_fmi, io::FMIndexDataDevice::FORWARD ); host_fmindex_type h_fmindex = h_fmi.partial_index(); cuda_fmindex_type d_fmindex = d_fmi.index(); io::SequenceDataStream* reads_file = io::open_sequence_file( reads_name, io::Phred, n_reads, 50 ); if (reads_file == NULL) { log_error(stderr, "unable to load \"%s\"\n", reads_name); exit(1); } // create a host-side read batch io::SequenceDataHost h_reads_data; // load a batch if (io::next( DNA_N, &h_reads_data, reads_file, n_reads ) == 0) { log_error(stderr, "unable to fetch reads from file \"%s\"\n", reads_name); exit(1); } // create a device-side read_batch const io::SequenceDataDevice d_reads_data( h_reads_data ); // create a host-side read batch typedef io::SequenceDataAccess<DNA_N> read_access_type; // create a read access const read_access_type h_reads_view( h_reads_data ); const read_access_type d_reads_view( d_reads_data ); thrust::device_vector<uint32> counter(1); counter[0] = 0; const uint32 blockdim = 128; const uint32 n_blocks = (d_reads_data.size() + blockdim - 1) / blockdim; // 20-mers, distance=0 { cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); count_kernel<<<n_blocks,blockdim>>>( d_reads_view, d_fmindex, 20u, 0u, 0u, thrust::raw_pointer_cast( &counter.front() ) ); cudaThreadSynchronize(); nvbio::cuda::check_error("count_kernel"); float time; cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); fprintf(stderr, " gpu backtracking (20,0,0)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } { Timer timer; timer.start(); uint32 counter = 0; #pragma omp parallel for for (int i = 0; i < (int)h_reads_data.size(); ++i) { count_core( i, h_reads_view, h_fmindex, 20u, 0u, 0u, &counter ); } timer.stop(); float time = timer.seconds() * 1000.0f; fprintf(stderr, " cpu backtracking (20,0,0)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } // 32-mers, distance=1 { cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); count_kernel<<<n_blocks,blockdim>>>( d_reads_view, d_fmindex, 32u, 0u, 1u, thrust::raw_pointer_cast( &counter.front() ) ); cudaThreadSynchronize(); nvbio::cuda::check_error("count_kernel"); float time; cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); fprintf(stderr, " gpu backtracking (32,1,0)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } { Timer timer; timer.start(); uint32 counter = 0; #pragma omp parallel for for (int i = 0; i < (int)h_reads_data.size(); ++i) { count_core( i, h_reads_view, h_fmindex, 32u, 0u, 1u, &counter ); } timer.stop(); float time = timer.seconds() * 1000.0f; fprintf(stderr, " cpu backtracking (32,1,0)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } // 50-mers, distance=2, seed=25 { cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); count_kernel<<<n_blocks,blockdim>>>( d_reads_view, d_fmindex, 50u, 25u, 2u, thrust::raw_pointer_cast( &counter.front() ) ); cudaThreadSynchronize(); nvbio::cuda::check_error("count_kernel"); float time; cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); fprintf(stderr, " gpu backtracking (50,2,25)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } { Timer timer; timer.start(); uint32 counter = 0; #pragma omp parallel for for (int i = 0; i < (int)h_reads_data.size(); ++i) { count_core( i, h_reads_view, h_fmindex, 50u, 25u, 2u, &counter ); } timer.stop(); float time = timer.seconds() * 1000.0f; fprintf(stderr, " cpu backtracking (52,2,25)... done: %.1fms, A/s: %.3f M\n", time, d_reads_data.size()/(time*1000.0f) ); } delete reads_file; } else log_warning(stderr, "unable to load \"%s\"\n", index_file); } int fmindex_test(int argc, char* argv[]) { uint32 synth_len = 10000000; uint32 synth_queries = 64*1024; const char* index_name = "./data/human.NCBI36/Human.NCBI36"; const char* reads_name = "./data/SRR493095_1.fastq.gz"; uint32 backtrack_queries = 64*1024; uint32 threads = omp_get_num_procs(); for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-synth-length" ) == 0) synth_len = atoi( argv[++i] )*1000; else if (strcmp( argv[i], "-synth-queries" ) == 0) synth_queries = atoi( argv[++i] )*1000; else if (strcmp( argv[i], "-backtrack-queries" ) == 0) backtrack_queries = atoi( argv[++i] ) * 1024; else if (strcmp( argv[i], "-index" ) == 0) index_name = argv[++i]; else if (strcmp( argv[i], "-reads" ) == 0) reads_name = argv[++i]; else if (strcmp( argv[i], "-threads" ) == 0) threads = atoi( argv[++i] ); } omp_set_num_threads( threads ); fprintf(stderr, "FM-index test... started\n"); if (synth_len && synth_queries) { synthetic_test<uint32>( synth_len, synth_queries ); synthetic_test<uint64>( synth_len, synth_queries ); } if (backtrack_queries) backtrack_test( index_name, reads_name, backtrack_queries ); fprintf(stderr, "FM-index test... done\n"); return 0; }
the_stack
#include <stdint.h> namespace cufhe { /** * @class FFP * @brief Wraps a uint64_t integer as an element in the field FF(P). And defines * arithmetic operations (operations). P = 2^64-2^32+1. * @details Do not use function Pow(...) in performance-critical code. * @details All function members are inline. * @details Consider all arithmetic functions for device only. */ class FFP { private: /** Field modulus P. */ static const uint64_t kModulus_ = 0xffffffff00000001UL; /** A 2^32-th primitive root of unity mod P. */ static const uint64_t kRoot2e32_ = 0xa70dc47e4cbdf43fUL; /** An 64-bit unsigned integer within [0, P-1]. */ uint64_t val_; /** A 64-bit integer modulo P. */ /** Cannot avoid divergence, since comparison causes branches in CUDA. */ __device__ inline void ModP(uint64_t& x) { asm("{\n\t" ".reg .u32 m;\n\t" ".reg .u64 t;\n\t" "set.ge.u32.u64 m, %0, %1;\n\t" "mov.b64 t, {m, 0};\n\t" "add.u64 %0, %0, t;\n\t" "}" : "+l"(x) : "l"(kModulus_)); } public: ////////////////////////////////////////////////////////////////////////////// // Access and set value /** Default constructor. Value is not specified. */ __host__ __device__ inline FFP() {} // shared memory requires empty constructor /** * Setting value to a (mod P). * @param[in] a An unsigned integer in [0, P-1]. * Immediates are uint32_t unless specified. */ __host__ __device__ inline FFP(uint8_t a) { val_ = a; }; __host__ __device__ inline FFP(uint16_t a) { val_ = a; }; __host__ __device__ inline FFP(uint32_t a) { val_ = a; }; __host__ __device__ inline FFP(uint64_t a) { val_ = a; }; __host__ __device__ inline FFP(int8_t a) { val_ = (uint64_t)a - (uint32_t)(-(a < 0)); }; __host__ __device__ inline FFP(int16_t a) { val_ = (uint64_t)a - (uint32_t)(-(a < 0)); }; __host__ __device__ inline FFP(int32_t a) { val_ = (uint64_t)a - (uint32_t)(-(a < 0)); }; __host__ __device__ inline FFP(int64_t a) { val_ = (uint64_t)a - (uint32_t)(-(a < 0)); }; /** Default destructor. Value is not wiped. */ __host__ __device__ inline ~FFP() {} /** Get value. */ __host__ __device__ inline uint64_t& val() { return val_; } /** Get value. */ __host__ __device__ inline const uint64_t& val() const { return val_; } /** Return modulus P. */ __host__ __device__ inline static uint64_t kModulus() { return kModulus_; }; /** Return 2^32-th primitive root of unity mod P. */ __host__ __device__ inline static uint64_t kRoot2e32() { return kRoot2e32_; }; ////////////////////////////////////////////////////////////////////////////// // Operators /** * Assign. * @param a [description] */ __host__ __device__ inline FFP& operator=(uint8_t a) { this->val_ = a; return *this; }; __host__ __device__ inline FFP& operator=(uint16_t a) { this->val_ = a; return *this; }; __host__ __device__ inline FFP& operator=(uint32_t a) { this->val_ = a; return *this; }; __host__ __device__ inline FFP& operator=(uint64_t a) { this->val_ = a; return *this; }; __host__ __device__ inline FFP& operator=(int8_t a) { this->val_ = (uint64_t)a - (uint32_t)(-(a < 0)); return *this; }; __host__ __device__ inline FFP& operator=(int16_t a) { this->val_ = (uint64_t)a - (uint32_t)(-(a < 0)); return *this; }; __host__ __device__ inline FFP& operator=(int32_t a) { this->val_ = (uint64_t)a - (uint32_t)(-(a < 0)); return *this; }; __host__ __device__ inline FFP& operator=(int64_t a) { this->val_ = (uint64_t)a - (uint32_t)(-(a < 0)); return *this; }; __host__ __device__ inline FFP& operator=(FFP a) { this->val_ = a.val(); return *this; } /** Explicit conversion. */ __host__ __device__ inline explicit operator uint64_t() { return val_; } // correct result __host__ __device__ inline explicit operator uint8_t() { return (uint8_t)val_; } // truncated result __host__ __device__ inline explicit operator uint16_t() { return (uint16_t)val_; } // truncated result __host__ __device__ inline explicit operator uint32_t() { return (uint32_t)val_; } // truncated result /** Addition in FF(P): val_ = val_ + a mod P. */ __device__ inline FFP& operator+=(const FFP& a) { this->Add(*this, a); return *this; } /** Addition in FF(P): return a + b mod P. */ friend __device__ inline FFP operator+(const FFP& a, const FFP& b) { FFP r; r.Add(a, b); return r; } /** Subtraction in FF(P): val_ = val_ - a mod P. */ __device__ inline FFP& operator-=(const FFP& a) { this->Sub(*this, a); return *this; } /** Subtraction in FF(P): return a - b mod P. */ friend __device__ inline FFP operator-(const FFP& a, const FFP& b) { FFP r; r.Sub(a, b); return r; } /** Multiplication in FF(P): val_ = val_ * a mod P. */ __device__ inline FFP& operator*=(const FFP& a) { this->Mul(*this, a); return *this; } /** Multiplication in FF(P): return a * b mod P. */ friend __device__ inline FFP operator*(const FFP& a, const FFP& b) { FFP r; r.Mul(a, b); return r; } /** Equality. */ __host__ __device__ inline bool operator==(const FFP& other) { return (bool)(val_ == other.val()); } /** Inequality. */ __host__ __device__ inline bool operator!=(const FFP& other) { return (bool)(val_ != other.val()); } ////////////////////////////////////////////////////////////////////////////// // Miscellaneous /** * Return a primitive n-th root in FF(P): val_ ^ n = 1 mod P. * @param[in] n A power of 2. */ __device__ inline static FFP Root(uint32_t n) { return Pow(kRoot2e32_, (uint32_t)((0x1UL << 32) / n)); } /** * Return the inverse of 2^log_n in FF(P): 2^{-log_n} mod P. * @param log_n An integer in [0, 32] */ __host__ __device__ inline static FFP InvPow2(uint32_t log_n) { uint32_t r[2]; r[0] = (0x1 << (32 - log_n)) + 1; r[1] = -r[0]; return FFP(*(uint64_t*)r); } /** Exchange values with a. */ __host__ __device__ inline void Swap(FFP& a) { uint64_t t = val_; val_ = a.val_; a.val_ = t; } ////////////////////////////////////////////////////////////////////////////// // Arithmetic /** Addition in FF(P): val_ = a + b mod P. */ __device__ inline void Add(const FFP& a, const FFP& b) { asm("{\n\t" ".reg .u32 m;\n\t" ".reg .u64 t;\n\t" ".reg .pred p;\n\t" // this = a + b; "add.u64 %0, %1, %2;\n\t" // this += (uint32_t)(-(this < b || this >= FFP_MODULUS)); "setp.lt.u64 p, %0, %2;\n\t" "set.ge.or.u32.u64 m, %0, %3, p;\n\t" "mov.b64 t, {m, 0};\n\t" "add.u64 %0, %0, t;\n\t" "}" : "+l"(val_) : "l"(a.val_), "l"(b.val_), "l"(kModulus_)); } /** Subtraction in FF(P): val_ = a + b mod P. */ __device__ inline void Sub(const FFP& a, const FFP& b) { register uint64_t r = 0; asm("{\n\t" ".reg .u32 m;\n\t" ".reg .u64 t;\n\t" // this = a - b; "sub.u64 %0, %1, %2;\n\t" // this -= (uint32_t)(-(this > a)); "set.gt.u32.u64 m, %0, %1;\n\t" "mov.b64 t, {m, 0};\n\t" "sub.u64 %0, %0, t;\n\t" "}" : "+l"(r) : "l"(a.val_), "l"(b.val_)); val_ = r; } /** Multiplication in FF(P): val_ = a * b mod P. */ __device__ inline void Mul(const FFP& a, const FFP& b) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 m0, m1, m2, m3;\n\t" ".reg .u64 t;\n\t" ".reg .pred p, q;\n\t" // 128-bit = 64-bit * 64-bit "mul.lo.u64 t, %1, %2;\n\t" "mov.b64 {m0, m1}, t;\n\t" "mul.hi.u64 t, %1, %2;\n\t" "mov.b64 {m2, m3}, t;\n\t" // 128-bit mod P with add / sub "add.u32 r1, m1, m2;\n\t" "sub.cc.u32 r0, m0, m2;\n\t" "subc.u32 r1, r1, 0;\n\t" "sub.cc.u32 r0, r0, m3;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // fix result "setp.eq.u32 p|q, m2, 0;\n\t" "mov.b64 t, {m0, m1};\n\t" // ret -= (uint32_t)(-(ret > mul[0] && m[2] == 0)); "set.gt.and.u32.u64 m3, %0, t, p;\n\t" "sub.cc.u32 r0, r0, m3;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret < mul[0] && m[2] != 0)); "set.lt.and.u32.u64 m3, %0, t, q;\n\t" "add.cc.u32 r0, r0, m3;\n\t" "addc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "l"(a.val_), "l"(b.val_)); ModP(val_); } /** \brief Exponentiation in FF(P): val_ = val_ ^ e mod P. */ __device__ inline void Pow(uint32_t e) { if (0 == e) { val_ = 1; return; } FFP y = 1; uint64_t n = (uint64_t)e; while (n > 1) { if (0 != (n & 0x1)) y *= (*this); *this *= (*this); n >>= 1; } *this *= y; } /** \brief Exponentiation in FF(P): return a ^ e mod P. */ __device__ inline static FFP Pow(const FFP& a, uint32_t e) { FFP r = a; r.Pow(e); return r; } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [0, 32) */ __device__ inline void Lsh32(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" // t[2] = (uint32_t)(x >> (64-l)); // t[1] = (uint32_t)(x >> (32-l)); // t[0] = (uint32_t)(x << l); "mov.b64 {r0, r1}, %0;\n\t" "shl.b32 t0, r0, %1;\n\t" "sub.u32 n, 32, %1;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t1, t2}, s;\n\t" // mod P "add.u32 r1, t1, t2;\n\t" "sub.cc.u32 r0, t0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret < ((uint64_t *)t)[0])); "mov.b64 s, {t0, t1};\n\t" "set.lt.u32.u64 t2, %0, s;\n\t" "add.cc.u32 r0, r0, t2;\n\t" "addc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [32, 64) */ __device__ inline void Lsh64(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" ".reg .pred p, q;\n\t" // t[2] = (uint32_t)(x >> (96-l)); // t[1] = (uint32_t)(x >> (64-l)); // t[0] = (uint32_t)(x << (l-32)); "mov.b64 {r0, r1}, %0;\n\t" "sub.u32 n, %1, 32;\n\t" "shl.b32 t0, r0, n;\n\t" "sub.u32 n, 32, n;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t1, t2}, s;\n\t" // mod P "add.u32 r1, t0, t1;\n\t" "sub.cc.u32 r0, 0, t1;\n\t" "subc.u32 r1, r1, 0;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret -= (uint32_t)(-(ret > ((uint64_t)t[0] << 32) && t[1] == 0)); "setp.eq.u32 p|q, t1, 0;\n\t" "mov.b64 s, {0, t0};\n\t" "set.gt.and.u32.u64 t2, %0, s, p;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret < ((uint64_t)t[0] << 32) && t[1] != 0)); "set.lt.and.u32.u64 t2, %0, s, q;\n\t" "add.cc.u32 r0, r0, t2;\n\t" "addc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [64, 96) */ __device__ inline void Lsh96(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" // t[2] = (uint32_t)(x >> (128-l)); // t[1] = (uint32_t)(x >> (96-l)); // t[0] = (uint32_t)(x << (l-64)); "mov.b64 {r0, r1}, %0;\n\t" "sub.u32 n, %1, 64;\n\t" "shl.b32 t0, r0, n;\n\t" "sub.u32 n, 32, n;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t1, t2}, s;\n\t" // mod P "add.cc.u32 r0, t1, t0;\n\t" "addc.u32 r1, t2, 0;\n\t" "sub.u32 r1, r1, t0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret -= (uint32_t)(-(ret > ((uint64_t *)t)[1])); "mov.b64 s, {t1, t2};\n\t" "set.gt.u32.u64 t2, %0, s;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); val_ = kModulus_ - val_; } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [96, 128) */ __device__ inline void Lsh128(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" // t[2] = (uint32_t)(x >> (160-l)); // t[1] = (uint32_t)(x >> (128-l)); // t[0] = (uint32_t)(x << (l-96)); "mov.b64 {r0, r1}, %0;\n\t" "sub.u32 n, %1, 96;\n\t" "shl.b32 t0, r0, n;\n\t" "sub.u32 n, 32, n;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t1, t2}, s;\n\t" // mod P "add.u32 r1, t1, t2;\n\t" "sub.cc.u32 r0, t0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret < ((uint64_t *)t)[0])); "mov.b64 s, {t0, t1};\n\t" "set.lt.u32.u64 t2, %0, s;\n\t" "add.cc.u32 r0, r0, t2;\n\t" "addc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); val_ = kModulus_ - val_; } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [128, 160) */ __device__ inline void Lsh160(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" ".reg .pred p, q;\n\t" // t[2] = (uint32_t)(x >> (192-l)); // t[1] = (uint32_t)(x >> (160-l)); // t[0] = (uint32_t)(x << (l-128)); "mov.b64 {r0, r1}, %0;\n\t" "sub.u32 n, %1, 128;\n\t" "shl.b32 t0, r0, n;\n\t" "sub.u32 n, 32, n;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t1, t2}, s;\n\t" // mod P "add.u32 r1, t0, t1;\n\t" "sub.cc.u32 r0, 0, t1;\n\t" "subc.u32 r1, r1, 0;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret -= (uint32_t)(-(ret > ((uint64_t)t[0] << 32) && t[1] == 0)); "setp.eq.u32 p|q, t1, 0;\n\t" "mov.b64 s, {0, t0};\n\t" "set.gt.and.u32.u64 t2, %0, s, p;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret < ((uint64_t)t[0] << 32) && t[1] != 0)); "set.lt.and.u32.u64 t2, %0, s, q;\n\t" "add.cc.u32 r0, r0, t2;\n\t" "addc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); val_ = kModulus_ - val_; } /** * Binary left shifting in FF(P): val_ = val_ * 2^l mod P. * @param[in] l An integer in [160, 192) */ __device__ inline void Lsh192(uint32_t l) { asm("{\n\t" ".reg .u32 r0, r1;\n\t" ".reg .u32 t0, t1, t2;\n\t" ".reg .u32 n;\n\t" ".reg .u64 s;\n\t" // t[2] = (uint32_t)(x << (l-160)); // t[1] = (uint32_t)(x >> (224-l)); // t[0] = (uint32_t)(x >> (192-l)); "mov.b64 {r0, r1}, %0;\n\t" "sub.u32 n, %1, 160;\n\t" "shl.b32 t2, r0, n;\n\t" "sub.u32 n, 32, n;\n\t" "shr.b64 s, %0, n;\n\t" "mov.b64 {t0, t1}, s;\n\t" // mod P "add.cc.u32 r0, t0, t2;\n\t" "addc.u32 r1, t1, 0;\n\t" "sub.u32 r1, r1, t2;\n\t" "mov.b64 %0, {r0, r1};\n\t" // ret += (uint32_t)(-(ret > ((uint64_t *)t)[0])); "mov.b64 s, {t0, t1};\n\t" "set.gt.u32.u64 t2, %0, s;\n\t" "sub.cc.u32 r0, r0, t2;\n\t" "subc.u32 r1, r1, 0;\n\t" "mov.b64 %0, {r0, r1};\n\t" "}" : "+l"(val_) : "r"(l)); // ret += (uint32_t)(-(ret >= FFP_MODULUS)); ModP(val_); } }; // class FFP } // namespace cufhe
the_stack
#include <string.h> #include <stdint.h> #include "sph/blake2s.h" #include "sph/sph_types.h" #include "cuda_helper.h" #include "cuda_vectors.h" #ifdef __INTELLISENSE__ #define __byte_perm(x, y, b) x #endif static const uint32_t blake2s_IV[8] = { 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL }; static const uint8_t blake2s_sigma[10][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } , { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } , { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } , { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } , { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } , { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } , { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } , { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } , { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } }; #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2s_sigma[r][2*i+0]]; \ d = SPH_ROTR32(d ^ a, 16); \ c = c + d; \ b = SPH_ROTR32(b ^ c, 12); \ a = a + b + m[blake2s_sigma[r][2*i+1]]; \ d = SPH_ROTR32(d ^ a, 8); \ c = c + d; \ b = SPH_ROTR32(b ^ c, 7); \ } while(0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while(0) extern "C" void blake2s_hash(void *output, const void *input){ uint32_t m[16]; uint32_t v[16]; uint32_t h[8]; uint32_t *in = (uint32_t*)input; // COMPRESS for(int i = 0; i < 16; ++i ) m[i] = in[i]; h[ 0] = 0x01010020 ^ blake2s_IV[0]; h[ 1] = blake2s_IV[1]; h[ 2] = blake2s_IV[2]; h[ 3] = blake2s_IV[3]; h[ 4] = blake2s_IV[4]; h[ 5] = blake2s_IV[5]; h[ 6] = blake2s_IV[6]; h[ 7] = blake2s_IV[7]; for(int i = 0; i < 8; ++i ) v[i] = h[i]; v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = 64 ^ blake2s_IV[4]; v[13] = blake2s_IV[5]; v[14] = blake2s_IV[6]; v[15] = blake2s_IV[7]; ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); for( size_t i = 0; i < 8; ++i ) h[i] ^= v[i] ^ v[i + 8]; // COMPRESS m[0] = in[16]; m[1] = in[17]; m[2] = in[18]; m[3] = in[19]; for( size_t i = 4; i < 16; ++i ) m[i] = 0; for( size_t i = 0; i < 8; ++i ) v[i] = h[i]; v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = 0x50 ^ blake2s_IV[4]; v[13] = blake2s_IV[5]; v[14] = ~blake2s_IV[6]; v[15] = blake2s_IV[7]; ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); for( size_t i = 0; i < 8; ++i ) h[i] ^= v[i] ^ v[i + 8]; memcpy(output, h, 32); } #define TPB 1024 #define NPT 256 #define maxResults 16 #define NBN 1 __constant__ uint32_t _ALIGN(16) midstate[20]; /* 16 adapters max */ static uint32_t *d_resNonce[MAX_GPUS]; static uint32_t *h_resNonce[MAX_GPUS]; #define GS4(a,b,c,d,e,f,a1,b1,c1,d1,e1,f1,a2,b2,c2,d2,e2,f2,a3,b3,c3,d3,e3,f3){ \ a += b + e; a1+= b1 + e1; a2+= b2 + e2; a3+= b3 + e3; \ d = ROL16( d ^ a); d1 = ROL16(d1 ^ a1); d2 = ROL16(d2 ^ a2); d3 = ROL16(d3 ^ a3); \ c +=d; c1+=d1; c2+=d2; c3+=d3;\ b = ROTR32(b ^ c, 12); b1 = ROTR32(b1^c1, 12); b2 = ROTR32(b2^c2, 12); b3 = ROTR32(b3^c3, 12); \ a += b + f; a1+= b1 + f1; a2+= b2 + f2; a3+= b3 + f3; \ d = ROR8(d ^ a); d1 = ROR8(d1^a1); d2 = ROR8(d2^a2); d3 = ROR8(d3^a3); \ c += d; c1 += d1; c2 += d2; c3 += d3;\ b = ROTR32(b ^ c, 7); b1 = ROTR32(b1^c1, 7); b2 = ROTR32(b2^c2, 7); b3 = ROTR32(b3^c3, 7); \ } __global__ __launch_bounds__(TPB,1) void blake2s_gpu_hash_nonce(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce, const uint32_t ptarget7){ const uint32_t step = gridDim.x * blockDim.x; uint32_t m[ 3]; uint32_t v[16]; m[0] = midstate[16]; m[1] = midstate[17]; m[2] = midstate[18]; const uint32_t h7 = midstate[19]; for(uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x ; thread <threads; thread+=step){ #pragma unroll for(int i=0;i<16;i++){ v[ i] = midstate[ i]; } uint32_t nonce = cuda_swab32(startNonce + thread); // Round( 0 ); v[ 1] += nonce; v[13] = ROR8(v[13] ^ v[ 1]); v[ 9] += v[13]; v[ 5] = ROTR32(v[ 5] ^ v[ 9], 7); v[ 1]+= v[ 6]; v[ 0]+= v[ 5]; v[12] = ROL16(v[12] ^ v[ 1]); v[13] = ROL16(v[13] ^ v[ 2]); v[15] = ROL16(v[15] ^ v[ 0]); v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14]; v[10]+= v[15]; v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12); v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 1]+= v[ 6]; v[ 2]+= v[ 7]; v[ 3]+= v[ 4]; v[ 0]+= v[ 5]; v[12] = ROR8(v[12] ^ v[ 1]); v[13] = ROR8(v[13] ^ v[ 2]); v[14] = ROR8(v[14] ^ v[ 3]); v[15] = ROR8(v[15] ^ v[ 0]); v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14]; v[10]+= v[15]; v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7); v[ 5] = ROTR32(v[ 5] ^ v[10], 7); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 1],0, v[ 1],v[ 6],v[11],v[12],m[ 0],m[ 2], v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],0,nonce); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,m[ 0], v[ 2],v[ 6],v[10],v[14],0,m[ 2], v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],nonce,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 1], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],nonce,m[ 1], v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 2],0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 0], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 0], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 2],0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 1], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],nonce,0); GS4(v[ 0],v[ 4],v[ 8],v[12],m[ 2],0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 0],0, v[ 3],v[ 7],v[11],v[15],0,nonce); GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 1],0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],m[ 1],0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 0],0, v[ 1],v[ 6],v[11],v[12],0,nonce, v[ 2],v[ 7],v[ 8],v[13],0,m[ 2], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,m[ 1], v[ 3],v[ 7],v[11],v[15],nonce,0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 0], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 2],0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,nonce, v[ 3],v[ 7],v[11],v[15],m[ 0],0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 2], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],m[ 1],0, v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 2], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],m[ 1],0); // GS(9,4,v[ 0],v[ 5],v[10],v[15]); v[ 0] += v[ 5]; v[ 2] += v[ 7] + nonce; v[15] = ROL16(v[15] ^ v[ 0]); v[13] = ROL16(v[13] ^ v[ 2]); v[10] += v[15]; v[ 8] += v[13]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 0] += v[ 5]; v[ 2] += v[ 7]; v[15] = ROR8(v[15] ^ v[ 0]); v[13] = ROR8(v[13] ^ v[ 2]); v[ 8] += v[13]; v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7); if (xor3x(h7,v[7],v[15]) <= ptarget7){ uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1; if(pos<maxResults) resNonce[pos]=nonce; // return; } } } __global__ __launch_bounds__(TPB) void blake2s_gpu_hash_nonce(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce){ const uint32_t step = gridDim.x * blockDim.x; uint32_t m[ 3]; uint32_t v[16]; m[0] = midstate[16]; m[1] = midstate[17]; m[2] = midstate[18]; const uint32_t h7 = midstate[19]; for(uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x ; thread <threads; thread+=step){ #pragma unroll for(int i=0;i<16;i++){ v[ i] = midstate[ i]; } uint32_t nonce = cuda_swab32(startNonce+thread); // Round( 0 ); v[ 1]+= nonce; v[13] = ROR8(v[13] ^ v[ 1]); v[ 9]+= v[13]; v[ 5] = ROTR32(v[ 5] ^ v[ 9], 7); v[ 1]+= v[ 6]; v[ 0]+= v[ 5]; v[13] = ROL16(v[13] ^ v[ 2]); v[12] = ROL16(v[12] ^ v[ 1]); v[15] = ROL16(v[15] ^ v[ 0]); v[ 8]+= v[13]; v[11]+= v[12]; v[ 9]+= v[14]; v[10]+= v[15]; v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12); v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 2]+= v[ 7]; v[ 1]+= v[ 6]; v[ 3]+= v[ 4]; v[ 0]+= v[ 5]; v[13] = ROR8(v[13] ^ v[ 2]); v[12] = ROR8(v[12] ^ v[ 1]); v[14] = ROR8(v[14] ^ v[ 3]); v[15] = ROR8(v[15] ^ v[ 0]); v[ 8]+= v[13]; v[11]+= v[12]; v[ 9]+= v[14]; v[10]+= v[15]; v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7); v[ 5] = ROTR32(v[ 5] ^ v[10], 7); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 1],0, v[ 1],v[ 6],v[11],v[12],m[ 0],m[ 2], v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],0,nonce); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,m[ 0], v[ 2],v[ 6],v[10],v[14],0,m[ 2], v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],nonce,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 1], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],nonce,m[ 1], v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 2],0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,m[ 0], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 0], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 2],0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 1], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],nonce,0); GS4(v[ 0],v[ 4],v[ 8],v[12],m[ 2],0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],m[ 0],0, v[ 3],v[ 7],v[11],v[15],0,nonce); GS4(v[ 0],v[ 5],v[10],v[15],0,0, v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 1],0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],m[ 1],0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],0,0); GS4(v[ 0],v[ 5],v[10],v[15],m[ 0],0, v[ 1],v[ 6],v[11],v[12],0,nonce, v[ 2],v[ 7],v[ 8],v[13],0,m[ 2], v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,m[ 1], v[ 3],v[ 7],v[11],v[15],nonce,0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 0], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],0,0, v[ 3],v[ 4],v[ 9],v[14],m[ 2],0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,0, v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,nonce, v[ 3],v[ 7],v[11],v[15],m[ 0],0); GS4(v[ 0],v[ 5],v[10],v[15],0,m[ 2], v[ 1],v[ 6],v[11],v[12],0,0, v[ 2],v[ 7],v[ 8],v[13],m[ 1],0, v[ 3],v[ 4],v[ 9],v[14],0,0); GS4(v[ 0],v[ 4],v[ 8],v[12],0,m[ 2], v[ 1],v[ 5],v[ 9],v[13],0,0, v[ 2],v[ 6],v[10],v[14],0,0, v[ 3],v[ 7],v[11],v[15],m[ 1],0); v[ 0] += v[ 5]; v[ 2] += v[ 7] + nonce; v[15] = ROL16(v[15] ^ v[ 0]); v[13] = ROL16(v[13] ^ v[ 2]); v[10] += v[15]; v[ 8] += v[13]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 0] += v[ 5]; v[ 2] += v[ 7]; v[15] = ROTR32(v[15] ^ v[ 0],1); v[13] = ROR8(v[13] ^ v[ 2]); if(xor3x(v[ 7],h7,v[ 8]+v[13])==v[15]){ uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1; if(pos<maxResults) resNonce[pos]=nonce; // return; } } } static void blake2s_setBlock(const uint32_t* input,const uint32_t ptarget7){ uint32_t _ALIGN(64) m[16]; uint32_t _ALIGN(64) v[16]; uint32_t _ALIGN(64) h[21]; // COMPRESS for(int i = 0; i < 16; ++i ) m[i] = input[i]; h[ 0] = 0x01010020 ^ blake2s_IV[0]; h[ 1] = blake2s_IV[1]; h[ 2] = blake2s_IV[2]; h[ 3] = blake2s_IV[3]; h[ 4] = blake2s_IV[4]; h[ 5] = blake2s_IV[5]; h[ 6] = blake2s_IV[6]; h[ 7] = blake2s_IV[7]; for(int i = 0; i < 8; ++i ) v[i] = h[i]; v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = 64 ^ blake2s_IV[4]; v[13] = blake2s_IV[5]; v[14] = blake2s_IV[6]; v[15] = blake2s_IV[7]; ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); for(int i = 0; i < 8; ++i ) h[i] ^= v[i] ^ v[i + 8]; h[16] = input[16]; h[17] = input[17]; h[18] = input[18]; h[ 8] = 0x6A09E667; h[ 9] = 0xBB67AE85; h[10] = 0x3C6EF372; h[11] = 0xA54FF53A; h[12] = 0x510E522F; h[13] = 0x9B05688C; h[14] = ~0x1F83D9AB; h[15] = 0x5BE0CD19; h[ 0]+= h[ 4] + h[16]; h[12] = SPH_ROTR32(h[12] ^ h[ 0],16); h[ 8]+= h[12]; h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8],12); h[ 0]+= h[ 4] + h[17]; h[12] = SPH_ROTR32(h[12] ^ h[ 0],8); h[ 8]+= h[12]; h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8],7); h[ 1]+= h[ 5] + h[18]; h[13] = SPH_ROTR32(h[13] ^ h[ 1], 16); h[ 9]+= h[13]; h[ 5] = ROTR32(h[ 5] ^ h[ 9], 12); h[ 2]+= h[ 6]; h[14] = SPH_ROTR32(h[14] ^ h[ 2],16); h[10]+= h[14]; h[ 6] = SPH_ROTR32(h[ 6] ^ h[10], 12); h[ 2]+= h[ 6]; h[14] = SPH_ROTR32(h[14] ^ h[ 2],8); h[10]+= h[14]; h[ 6] = SPH_ROTR32(h[ 6] ^ h[10], 7); h[19] = h[7]; //constant h[7] for nonce check h[ 3]+= h[ 7]; h[15] = SPH_ROTR32(h[15] ^ h[ 3],16); h[11]+= h[15]; h[ 7] = SPH_ROTR32(h[ 7] ^ h[11], 12); h[ 3]+= h[ 7]; h[15] = SPH_ROTR32(h[15] ^ h[ 3],8); h[11]+= h[15]; h[ 7] = SPH_ROTR32(h[ 7] ^ h[11], 7); h[ 1]+= h[ 5]; h[ 3]+= h[ 4]; h[14] = SPH_ROTR32(h[14] ^ h[ 3],16); h[ 2]+= h[ 7]; if(ptarget7==0){ h[19] = SPH_ROTL32(h[19],7); //align the rotation with v[7] v[15]; } cudaMemcpyToSymbol(midstate, h, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice); } static bool init[MAX_GPUS] = { 0 }; extern "C" int scanhash_blake2s(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done){ uint32_t _ALIGN(64) endiandata[20]; uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; const int dev_id = device_map[thr_id]; int intensity = 31; uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB)); const dim3 block(TPB); if (!init[thr_id]) { cudaSetDevice(dev_id); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } gpulog(LOG_INFO,thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t))); h_resNonce[thr_id] = (uint32_t*) malloc(maxResults * sizeof(uint32_t)); if(h_resNonce[thr_id] == NULL){ gpulog(LOG_ERR,thr_id,"Host memory allocation failed"); exit(EXIT_FAILURE); } CUDA_LOG_ERROR(); init[thr_id] = true; } for (int i=0; i < 19; i++) { be32enc(&endiandata[i], pdata[i]); } blake2s_setBlock(endiandata,ptarget[7]); int rc=0; cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t)); do { if(ptarget[7]){ blake2s_gpu_hash_nonce<<<grid, block>>>(throughput,pdata[19],d_resNonce[thr_id],ptarget[7]); }else{ blake2s_gpu_hash_nonce<<<grid, block>>>(throughput,pdata[19],d_resNonce[thr_id]); } cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost); if (h_resNonce[thr_id][0] != 0){ cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t)); if(h_resNonce[thr_id][0]>(maxResults-1)){ gpulog(LOG_WARNING,dev_id,"Candidate flood: %u",h_resNonce[thr_id][0]); h_resNonce[thr_id][0]=maxResults-1; } uint32_t i; for(i=1;i<h_resNonce[thr_id][0]+1;i++){ uint32_t vhashcpu[8]; uint32_t nonce = sph_bswap32(h_resNonce[thr_id][i]); be32enc(&endiandata[19],nonce); blake2s_hash(vhashcpu, endiandata); if(vhashcpu[ 6]<=ptarget[6] && fulltest(vhashcpu, ptarget)){ work_set_target_ratio(work, vhashcpu); *hashes_done = pdata[19] - first_nonce + throughput; pdata[19] = work->nonces[0] = nonce; rc=1; //search for 2nd nonce for(uint32_t j=i+1;j<h_resNonce[thr_id][0]+1;j++){ nonce = sph_bswap32(h_resNonce[thr_id][j]); be32enc(&endiandata[19],nonce); blake2s_hash(vhashcpu, endiandata); if(vhashcpu[ 6]<=ptarget[6] && fulltest(vhashcpu, ptarget)){ work->nonces[1] = nonce; // if(!opt_quiet) // gpulog(LOG_BLUE,dev_id,"Found 2nd nonce: %u/%08X - %u/%08X",i,pdata[19],j,work->nonces[1]); if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) { work_set_target_ratio(work, vhashcpu); xchg(work->nonces[1], pdata[19]); } rc=2; break; } } return rc; }/*else{ applog_hash(ptarget); applog_compare_hash(vhashcpu, ptarget); gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", h_resNonce[thr_id][0]); }*/ } } pdata[19]+=throughput; } while (!work_restart[thr_id].restart && (uint64_t)max_nonce > (uint64_t)throughput + (uint64_t)pdata[19]); *hashes_done = pdata[19] - first_nonce; // MyStreamSynchronize(NULL, 0, device_map[thr_id]); return rc; } // cleanup extern "C" void free_blake2s(int thr_id) { if (!init[thr_id]) return; cudaDeviceSynchronize(); free(h_resNonce[thr_id]); cudaFree(d_resNonce[thr_id]); init[thr_id] = false; cudaDeviceSynchronize(); }
the_stack
// This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <THC/THC.h> #include <iostream> #include <cstdint> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <stdio.h> #define THREADS 128 #define idx(X, Y) ((px * 2 + (X)) + (py * 2 + (Y)) * (I_width)) #define channel blockIdx.y #define image blockIdx.z #include "common.h" #if 0 // 1 == fast math, 0 == normal math #define nn_exp __expf #define nn_pow __powf #define nn_log __logf #else #define nn_exp exp #define nn_pow pow #define nn_log log #endif //------------------------------------------------------------------- __device__ __forceinline__ void operator+=(float2& out, const float2 in) { out.x += in.x; out.y += in.y; } //------------------------------------------------------------------- __device__ __forceinline__ float calcVMAX(const float I_q, const float It_p, const float epsilon) { float x = I_q - It_p; return x * x + epsilon; } //------------------------------------------------------------------- __device__ __forceinline__ float2 calcSUM(const float I_q, const float It_p, const float epsilon, const float alpha_e, const float lambda_e, const float vmax) { float x = I_q - It_p; float a = (x * x + epsilon) / vmax; float b = nn_pow(a, lambda_e); float weight = b + alpha_e; return make_float2(I_q * weight, weight); } //------------------------------------------------------------------- template<bool overlap> __global__ void GPU_FORWARD( const float* __restrict__ I, const float* __restrict__ It, // I~ float* __restrict__ O, const float* const __restrict__ lambda, const float* __restrict__ alpha, const float epsilon, const int I_width, const int I_height, const int numChannels ) { const uint32_t O_width = I_width / 2; const uint32_t O_height = I_height / 2; const int tid = blockIdx.x * THREADS + threadIdx.x; const int px = tid % O_width; const int py = tid / O_width; // bail out if this is outside the image if(py >= O_height) return; // move ptr to correct image, channel (and patch for It and O) I += (image * numChannels + channel) * I_width * I_height; It += (image * numChannels + channel) * O_width * O_height + py * O_width + px; O += (image * numChannels + channel) * O_width * O_height + py * O_width + px; // precompute some values const float alpha_e = nn_exp(alpha[channel]); const float lambda_e = nn_exp(lambda[channel]) / 2.0f; // Stage 1: calculate VMAX float vmax; vmax = calcVMAX(I[idx(0, 0)], *It, epsilon); vmax += calcVMAX(I[idx(0, 1)], *It, epsilon); vmax += calcVMAX(I[idx(1, 0)], *It, epsilon); vmax += calcVMAX(I[idx(1, 1)], *It, epsilon); if(overlap) { if(px != 0) { vmax += calcVMAX(I[idx(-1, 0)], *It, epsilon); vmax += calcVMAX(I[idx(-1, 1)], *It, epsilon); } if(py != 0) { vmax += calcVMAX(I[idx(0, -1)], *It, epsilon); vmax += calcVMAX(I[idx(1, -1)], *It, epsilon); } if(px != 0 && py != 0) { vmax += calcVMAX(I[idx(-1, -1)], *It, epsilon); } } // Stage 2: calculate O(p) float2 sums; sums = calcSUM(I[idx(0, 0)], *It, epsilon, alpha_e, lambda_e, vmax); sums += calcSUM(I[idx(0, 1)], *It, epsilon, alpha_e, lambda_e, vmax); sums += calcSUM(I[idx(1, 0)], *It, epsilon, alpha_e, lambda_e, vmax); sums += calcSUM(I[idx(1, 1)], *It, epsilon, alpha_e, lambda_e, vmax); if(overlap) { if(px != 0) { sums += calcSUM(I[idx(-1, 0)], *It, epsilon, alpha_e, lambda_e, vmax); sums += calcSUM(I[idx(-1, 1)], *It, epsilon, alpha_e, lambda_e, vmax); } if(py != 0) { sums += calcSUM(I[idx(0, -1)], *It, epsilon, alpha_e, lambda_e, vmax); sums += calcSUM(I[idx(1, -1)], *It, epsilon, alpha_e, lambda_e, vmax); } if(px != 0 && py != 0) { sums += calcSUM(I[idx(-1, -1)], *It, epsilon, alpha_e, lambda_e, vmax); } } // store result *O = sums.x / sums.y; } //------------------------------------------------------------------- __device__ void reduce(float* out, float total) { const uint32_t numWarps = THREADS / 32; __shared__ float sum[numWarps]; total += __shfl_down(total, 16); total += __shfl_down(total, 8); total += __shfl_down(total, 4); total += __shfl_down(total, 2); total += __shfl_down(total, 1); if(threadIdx.x % 32 == 0) sum[threadIdx.x / 32] = total; __syncthreads(); if(threadIdx.x < numWarps) { total = sum[threadIdx.x]; total += __shfl_down(total, 2); total += __shfl_down(total, 1); if(threadIdx.x == 0) atomicAdd(out, total); } } //------------------------------------------------------------------------------- //------------------------------------------------------------------- void check(cudaError err) { if(err != cudaSuccess) { std::cerr << "CUDA_ERROR: " << (int)err << " " << cudaGetErrorName(err) << ": " << cudaGetErrorString(err) << std::endl; exit(1); } } //------------------------------------------------------------------- void calcLaunchConfig(const uint32_t I_width, const uint32_t I_height, const uint32_t numImages, const uint32_t numChannels, dim3& threads, dim3& blocks) { // calc launch config const uint32_t O_width = I_width / 2; const uint32_t O_height = I_height / 2; const uint32_t numPixels = O_width * O_height; threads = dim3(THREADS, 1, 1); blocks = dim3((uint32_t)std::ceil(numPixels / (float)THREADS), numChannels, numImages); } //----------------------------------------------------------------------------------------------------------------------- extern "C" void SpatialInverseBilateralPooling_updateOutput(THCState* state, THCudaTensor* I, THCudaTensor* It,THCudaTensor* lambda,THCudaTensor* alpha, THCudaTensor* output, int kW, int kH, int dW, int dH) { //VISINF_assertSameGPU(state, 5, I,It,lambda,alpha, output); long nInputCols, nInputRows, nInputPlane, batchSize; if (I->nDimension == 3) { nInputCols = I->size[2]; nInputRows = I->size[1]; nInputPlane = I->size[0]; batchSize = 1; } else if (I->nDimension == 2){ nInputCols = 1; nInputRows = 1; nInputPlane = I->size[1]; batchSize = I->size[0]; } else { nInputCols = I->size[3]; nInputRows = I->size[2]; nInputPlane = I->size[1]; batchSize = I->size[0]; } long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1; long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1; I = THCudaTensor_newContiguous(state, I); It = THCudaTensor_newContiguous(state, It); lambda = THCudaTensor_newContiguous(state, lambda); alpha = THCudaTensor_newContiguous(state, alpha); float* I_data = THCudaTensor_data(state, I); float* It_data = THCudaTensor_data(state, It); float* lambda_data = THCudaTensor_data(state, lambda); float* alpha_data = THCudaTensor_data(state, alpha); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resizeAs(state, It, output); float* output_data = THCudaTensor_data(state, output); float epsilon = 0.001f; // get launch config dim3 threads, blocks; calcLaunchConfig(nInputCols, nInputRows, batchSize, nInputPlane, threads, blocks); // init some variables check(cudaMemset(output_data, 0, sizeof(float) * batchSize * nInputPlane * (nInputCols / 2) * (nInputRows / 2))); GPU_FORWARD<false> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(I_data, It_data, output_data, lambda_data, alpha_data, epsilon, nInputCols, nInputRows, nInputPlane); // wait for kernels //check(cudaDeviceSynchronize()); //check(cudaStreamSynchronize(THCState_getCurrentStream(state))); THCudaCheck(cudaGetLastError()); if(I->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_free(state, I); THCudaTensor_free(state, It); THCudaTensor_free(state, lambda); THCudaTensor_free(state, alpha); } //----------------------------------------------------------------------------------------------------------------------------------------- __global__ void GPU_BACKWARD_I_V1( const float* __restrict__ gradOutput, const float* __restrict__ I, float* __restrict__ I_gradient, const float* __restrict__ Itilde, float* __restrict__ Itilde_gradient, const float* __restrict__ lambda, const float* __restrict__ alpha, const float epsilon, const uint32_t I_width, const uint32_t I_height, const uint32_t numChannels ) { const uint32_t O_width = I_width / 2; const uint32_t O_height = I_height / 2; const int tid = blockIdx.x * THREADS + threadIdx.x; const int px = tid % O_width; const int py = tid / O_width; // bail out if this is outside the image if(py >= O_height) return; // move ptr to correct image, channel (and patch for It and O) I += (image * numChannels + channel) * I_width * I_height; I_gradient += (image * numChannels + channel) * I_width * I_height; Itilde += (image * numChannels + channel) * O_width * O_height + py * O_width + px; gradOutput += (image * numChannels + channel) * O_width * O_height + py * O_width + px; Itilde_gradient += (image * numChannels + channel) * O_width * O_height + py * O_width + px; // constants const float It = *Itilde; const float gOut= *gradOutput; const float ea = nn_exp(alpha[channel]); const float el = nn_exp(lambda[channel]) / 2.0f; // general float n = 0.0f; float A = 0.0f; float C = 0.0f; //--------------------------------------------------------------- // iterate all pixels #pragma unroll for(int dy = 0; dy <= 1; ++dy) { #pragma unroll for(int dx = 0; dx <= 1; ++dx) { // values float In = I[idx(dx, dy)]; float x = In - It; // (In - It) float x2 = x * x; // (In - It)^2 float v = 2.0f * In - 2.0f * It; // 2In - 2It n++; // general A += x2; C += v; } } A += n * epsilon; //--------------------------------------------------------------- float B = 0.0f; float D = 0.0f; float tilde_0 = 0.0f; float tilde_1 = 0.0f; float image_0[2][2] = {0}; float image_1[2][2] = {0}; float image_2[2][2] = {0}; float image_3[2][2] = {0}; // iterate all pixels #pragma unroll for(int dy = 0; dy <= 1; ++dy) { #pragma unroll for(int dx = 0; dx <= 1; ++dx) { // values //printf("------------------------------------------------------------------------------------------ \n"); float In = I[idx(dx, dy)]; float x = In - It; // (In - It) float x2 = x * x; // (In - It)^2 float y = x2 + epsilon; // (In - It)^2 + 1/1000 float yA = y / A; // ((ln - It)^2 + 1/1000) / A float z = nn_pow(yA, el); // (((ln - It)^2 + 1/1000) / A)^((e^ln)/2) float z1 = nn_pow(yA, el-1.0f); // (((ln - It)^2 + 1/1000) / A)^((e^ln)/2 - 1) float v = 2.0f * In - 2.0f * It; // 2In - 2It float w = v / A - (y * C) / (A * A); // (2In - 2It)/A - (y*C)/A^2 float ww = v / A - (y * v) / (A * A); // (2In - 2It)/A - (y*v)/A^2 float k = nn_log(yA); // log(yA) // general B += z; D += In * (ea + z); // tilde float tt = el * w * z1; tilde_0 += tt; tilde_1 += In * tt; // image image_0[dy][dx] = ea + z + (In * el * ww * z1); image_2[dy][dx] = -(el * ww * z1) ; float it1 = (In * el * y * z1) / ( A * A); float it3 = (el * y * z1) / (A * A); //printf("ea: %f el: %f I: %f It: %f w: %f z: %f z1: %f B: %f D: %f \n", ea, el, In, It, w, z, z1,B, D); #pragma unroll for(int iy = 0; iy <= 1; ++iy) { #pragma unroll for(int ix = 0; ix <= 1; ++ix) { if(ix == dx && iy == dy) continue; image_1[iy][ix] += 2.0f * (I[idx(ix, iy)]-It) * it1; image_3[iy][ix] += 2.0f * (I[idx(ix, iy)]-It) * it3; } } //printf("------------------------------------------------------------------------------------------ \n"); } } B += n * ea; //printf("final B: %f \n",B); //D *= n * ea; //--------------------------------------------------------------- // final image //printf("image_0[0][0]: %f image_0[0][1]: %f image_0[1][0]: %f image_0[1][1]: %f \n image_1[0][0]: %f image_1[0][1]: %f image_1[1][0]: %f image_1[1][1]: %f \n image_2[0][0]: %f image_2[0][1]: %f image_2[1][0]: %f image_2[1][1]: %f \n image_3[0][0]: %f image_3[0][1]: %f image_3[1][0]: %f image_3[1][1]: %f \n B: %f D: %f \n --------------------------------------------------------------\n",image_0[0][0],image_0[0][1],image_0[1][0],image_0[1][1],image_1[0][0],image_1[0][1],image_1[1][0],image_1[1][1],image_2[0][0],image_2[0][1],image_2[1][0],image_2[1][1],image_3[0][0],image_3[0][1],image_3[1][0],image_3[1][1],B,D); #pragma unroll for(int dy = 0; dy <= 1; ++dy) { #pragma unroll for(int dx = 0; dx <= 1; ++dx) { I_gradient[idx(dx, dy)] = gOut * ((image_0[dy][dx] - image_1[dy][dx]) / B + ((image_2[dy][dx] + image_3[dy][dx]) * D) / (B * B)); } } //--------------------------------------------------------------- // final tilde *Itilde_gradient = gOut * (D * tilde_0 / (B*B) - tilde_1 / B); } extern "C" void SpatialInverseBilateralPooling_updateGradInput(THCState* state, THCudaTensor* I,THCudaTensor* It, THCudaTensor* lambda,THCudaTensor* alpha, THCudaTensor* gradI, THCudaTensor* gradIt, THCudaTensor* gradOutput, int kW, int kH, int dW, int dH) { //VISINF_assertSameGPU(state, 7, I,It,lambda,alpha,gradI,gradIt, gradOutput); long nInputCols, nInputRows, nInputPlane, batchSize; if (I->nDimension == 3) { nInputCols = I->size[2]; nInputRows = I->size[1]; nInputPlane = I->size[0]; batchSize = 1; } else if (I->nDimension == 2){ nInputCols = 1; nInputRows = 1; nInputPlane = I->size[1]; batchSize = I->size[0]; } else { nInputCols = I->size[3]; nInputRows = I->size[2]; nInputPlane = I->size[1]; batchSize = I->size[0]; } float epsilon = 0.001f; long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1; long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1; gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradI, I); THCudaTensor_resizeAs(state, gradIt, It); // get launch config dim3 threads, blocks; calcLaunchConfig(nInputCols, nInputRows, batchSize, nInputPlane, threads, blocks); // VARIANT 1: without overlap GPU_BACKWARD_I_V1<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, I), THCudaTensor_data(state, gradI), THCudaTensor_data(state, It), THCudaTensor_data(state, gradIt), THCudaTensor_data(state, lambda), THCudaTensor_data(state, alpha), epsilon, nInputCols, nInputRows, nInputPlane); // VARIANT 2: with overlap //GPU_BACKWARD_I_V2<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(I, gradI, It, gradIt, lambda, alpha, epsilon, nInputCols, nInputRows, nInputPlane); // wait for kernels //check(cudaDeviceSynchronize()); THCudaCheck(cudaGetLastError()); THCudaTensor_free(state, gradOutput); } //------------------------------------------------------------------- __global__ void GPU_BACKWARD_LA_V1( const float* __restrict__ gradOutput, const float* __restrict__ I, const float* __restrict__ Itilde, const float* __restrict__ lambda, float* __restrict__ lambda_gradient, const float* __restrict__ alpha, float* __restrict__ alpha_gradient, const float epsilon, const uint32_t I_width, const uint32_t I_height, const uint32_t numChannels ) { const uint32_t O_width = I_width / 2; const uint32_t O_height = I_height / 2; const int tid = blockIdx.x * THREADS + threadIdx.x; const int px = tid % O_width; const int py = tid / O_width; // move ptr to correct image, channel (and patch for It and O) I += (image * numChannels + channel) * I_width * I_height; Itilde += (image * numChannels + channel) * O_width * O_height + py * O_width + px; gradOutput += (image * numChannels + channel) * O_width * O_height + py * O_width + px; alpha_gradient += channel; lambda_gradient += channel; float n = 0.0f; float A = 0.0f; float C = 0.0f; float alpha_0 = 0.0f; float B = 0.0f; float Dea = 0.0f; float D = 0.0f; float lamdba_0 = 0.0f; float lamdba_1 = 0.0f; // bail out if this is outside the image if(py < O_height) { // constants const float It = *Itilde; const float gOut = *gradOutput; const float ea = nn_exp(alpha[channel]); const float el = nn_exp(lambda[channel]) / 2.0f; //--------------------------------------------------------------- // iterate all pixels #pragma unroll for(int dy = 0; dy <= 1; ++dy) { #pragma unroll for(int dx = 0; dx <= 1; ++dx) { // values float In = I[idx(dx, dy)]; float x = In - It; // (In - It) float x2 = x * x; // (In - It)^2 float v = 2.0f * In - 2.0f * It; // 2In - 2It n++; // general A += x2; C += v; // alpha alpha_0 += In; } } alpha_0 *= ea; A += n * epsilon; //--------------------------------------------------------------- // iterate all pixels #pragma unroll for(int dy = 0; dy <= 1; ++dy) { #pragma unroll for(int dx = 0; dx <= 1; ++dx) { // values float In = I[idx(dx, dy)]; float x = In - It; // (In - It) float x2 = x * x; // (In - It)^2 float y = x2 + epsilon; // (In - It)^2 + 1/1000 float yA = y / A; // ((ln - It)^2 + 1/1000) / A float z = nn_pow(yA, el); // (((ln - It)^2 + 1/1000) / A)^((e^ln)/2) float z1 = nn_pow(yA, el-1.0f); // (((ln - It)^2 + 1/1000) / A)^((e^ln)/2 - 1) float k = nn_log(yA); // log(yA) // general B += z; D += In * (ea + z); // lamdba lamdba_0 += In * k * el * z; lamdba_1 += k * el * z ; } } B += n * ea; Dea = D * n * ea * gOut; alpha_0 *= gOut; lamdba_0 *= gOut; lamdba_1 *= gOut; } //--------------------------------------------------------------- // prevent division by 0! float final_alpha, final_lambda; if(B == 0.0f) { final_alpha = 0.0f; final_lambda = 0.0f; } else { final_alpha = alpha_0 / B - Dea / (B * B); final_lambda = lamdba_0 / B - (D * lamdba_1 / (B * B)); } // final alpha reduce(alpha_gradient, final_alpha); // final lamdba reduce(lambda_gradient, final_lambda); } //------------------------------------------------------------------- extern "C" void SpatialInverseBilateralPooling_accGradParameters(THCState* state, THCudaTensor* I,THCudaTensor* It, THCudaTensor* lambda,THCudaTensor* alpha, THCudaTensor* lambda_gradient, THCudaTensor* alpha_gradient, THCudaTensor* gradOutput, int kW, int kH, int dW, int dH) { //VISINF_assertSameGPU(state, 7, I,It,lambda,alpha,gradLambda,gradAlpha, gradOutput); long nInputCols, nInputRows, nInputPlane, batchSize; if (I->nDimension == 3) { nInputCols = I->size[2]; nInputRows = I->size[1]; nInputPlane = I->size[0]; batchSize = 1; } else if (I->nDimension == 2){ nInputCols = 1; nInputRows = 1; nInputPlane = I->size[1]; batchSize = I->size[0]; } else { nInputCols = I->size[3]; nInputRows = I->size[2]; nInputPlane = I->size[1]; batchSize = I->size[0]; } float epsilon = 0.001f; long nOutputCols = ceil(float(nInputCols - kW) / float(dW)) + 1; long nOutputRows = ceil(float(nInputRows - kH) / float(dH)) + 1; gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, lambda_gradient, lambda); THCudaTensor_resizeAs(state, alpha_gradient, alpha); // get launch config dim3 threads, blocks; calcLaunchConfig(nInputCols, nInputRows, batchSize, nInputPlane, threads, blocks); // init some variables check(cudaMemset(THCudaTensor_data(state, alpha_gradient), 0, sizeof(float) * nInputPlane)); check(cudaMemset(THCudaTensor_data(state, lambda_gradient), 0, sizeof(float) * nInputPlane)); // VARIANT 1: without overlap GPU_BACKWARD_LA_V1<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, I), THCudaTensor_data(state, It), THCudaTensor_data(state, lambda), THCudaTensor_data(state, lambda_gradient), THCudaTensor_data(state, alpha), THCudaTensor_data(state, alpha_gradient), epsilon, nInputCols, nInputRows, nInputPlane); // VARIANT 2: with overlap //GPU_BACKWARD_LA_V2<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(I, It, lambda, lambda_gradient, alpha, alpha_gradient, epsilon, I_width, I_height, numChannels); // wait for kernels //check(cudaDeviceSynchronize()); THCudaCheck(cudaGetLastError()); THCudaTensor_free(state, gradOutput); }
the_stack
// Adopted from caffe2 depthwise conv at // pytorch/caffe2/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu namespace caffe2 { struct DepthwiseArgs { // Input layer dimensions int batch{0}; int in_rows{0}; int in_cols{0}; int in_length{0}; int in_depth{0}; // filter size int filter_rows{0}; int filter_cols{0}; int filter_length{0}; // strides and pads int stride{0}; int temporal_stride{0}; int pad_rows{0}; int pad_cols{0}; int pad_length{0}; // Output layer dimensions int out_rows{0}; int out_cols{0}; int out_length{0}; int out_depth{0}; }; template <typename T> __global__ void DepthwiseConv3dGPUKernelNCHW( const DepthwiseArgs args, const T* input, const T* filter, T* output, int num_outputs) { const int in_rows = args.in_rows; const int in_cols = args.in_cols; const int in_length = args.in_length; const int in_depth = args.in_depth; const int filter_rows = args.filter_rows; const int filter_cols = args.filter_cols; const int filter_length = args.filter_length; const int stride = args.stride; const int temporal_stride = args.temporal_stride; const int pad_rows = args.pad_rows; const int pad_cols = args.pad_cols; const int pad_length = args.pad_length; const int out_rows = args.out_rows; const int out_cols = args.out_cols; const int out_length = args.out_length; const int out_depth = args.out_depth; CUDA_1D_KERNEL_LOOP(thread_id, num_outputs) { const int OW = thread_id % out_cols; const int OH = (thread_id / out_cols) % out_rows; const int OL = (thread_id / out_cols / out_rows) % out_length; const int OC = (thread_id / out_cols / out_rows / out_length) % out_depth; const int OB = thread_id / out_cols / out_rows / out_length / out_depth; const int in_d = OC; const int input_offset_temp = (OB * in_depth + OC) * (in_length * in_rows * in_cols); const int input_row_start = OH * stride - pad_rows; const int input_col_start = OW * stride - pad_cols; const int input_length_start = OL * temporal_stride - pad_length; const int input_row_end = input_row_start + filter_rows; const int input_col_end = input_col_start + filter_cols; const int input_length_end = input_length_start + filter_length; const float* filter_start = filter + in_d * filter_rows * filter_cols * filter_length; T sum = 0; if (input_row_start >= 0 && input_col_start >= 0 && input_length_start >= 0 && input_row_end < in_rows && input_col_end < in_cols && input_length_end < in_length) { // Loop that doesn't need to check for boundary conditions. #pragma unroll for (int f_l = 0; f_l < filter_length; ++f_l) { const int in_l = input_length_start + f_l; #pragma unroll for (int f_r = 0; f_r < filter_rows; ++f_r) { const int in_r = input_row_start + f_r; const float* filter_offset = filter_start + filter_cols * filter_rows * f_l + filter_cols * f_r; #pragma unroll for (int f_c = 0; f_c < filter_cols; ++f_c) { const int in_c = input_col_start + f_c; const int input_offset = (input_offset_temp) + (in_l * in_cols * in_rows) + (in_r * in_cols) + in_c; #if __CUDA_ARCH__ >= 350 sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c); #else sum += input[input_offset] * filter_offset[f_c]; #endif } } } } else { // Loop that needs to check for boundary conditions. #pragma unroll for (int f_l = 0; f_l < filter_length; ++f_l) { const int in_l = input_length_start + f_l; #pragma unroll for (int f_r = 0; f_r < filter_rows; ++f_r) { const int in_r = input_row_start + f_r; const float* filter_offset = filter_start + filter_cols * filter_rows * f_l + filter_cols * f_r; #pragma unroll for (int f_c = 0; f_c < filter_cols; ++f_c) { const int in_c = input_col_start + f_c; if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols && in_l >= 0 && in_l < in_length) { const int input_offset = (input_offset_temp) + (in_l * in_cols * in_rows) + (in_r * in_cols) + in_c; #if __CUDA_ARCH__ >= 350 sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c); #else sum += input[input_offset] * filter_offset[f_c]; #endif } } } } } output[thread_id] = sum; } } // A Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. template <typename T> __global__ void DepthwiseConv3dBackpropFilterGPUKernelNCHW( const DepthwiseArgs args, const T* out_backprop, const T* input, T* filter_backprop, int num_out_backprop) { const int in_rows = args.in_rows; const int in_cols = args.in_cols; const int in_length = args.in_length; const int in_depth = args.in_depth; const int filter_rows = args.filter_rows; const int filter_cols = args.filter_cols; const int filter_length = args.filter_length; const int stride = args.stride; const int temporal_stride = args.temporal_stride; const int pad_rows = args.pad_rows; const int pad_cols = args.pad_cols; const int pad_length = args.pad_length; const int out_rows = args.out_rows; const int out_cols = args.out_cols; const int out_length = args.out_length; const int out_depth = args.out_depth; CUDA_1D_KERNEL_LOOP(thread_id, num_out_backprop) { // Compute the indexes of this thread in the output. const int OW = thread_id % out_cols; const int OH = (thread_id / out_cols) % out_rows; const int OL = (thread_id / out_cols / out_rows) % out_length; const int OC = (thread_id / out_cols / out_rows / out_length) % out_depth; const int OB = thread_id / out_cols / out_rows / out_length / out_depth; // Compute the input depth and the index of depth multiplier. const int in_d = OC; // Decide if all input is valid, if yes, we can skip the boundary checks // for each input. const int in_r_start = OH * stride - pad_rows; const int in_c_start = OW * stride - pad_cols; const int in_l_start = OL * temporal_stride - pad_length; const int in_r_end = in_r_start + filter_rows; const int in_c_end = in_c_start + filter_cols; const int in_l_end = in_l_start + filter_length; const int out_backprop_offset = (OB * out_depth * out_length * out_rows * out_cols) + (OC * out_length * out_rows * out_cols) + (OL * out_rows * out_cols) + (OH * out_cols) + (OW); #if __CUDA_ARCH__ >= 350 const T out_bp = __ldg(out_backprop + out_backprop_offset); #else const T out_bp = out_backprop[out_backprop_offset]; #endif if (in_r_start >= 0 && in_c_start >= 0 && in_r_end < in_rows && in_c_end < in_cols && in_l_start >= 0 && in_l_end < in_length) { #pragma unroll for (int f_l = 0; f_l < filter_length; ++f_l) { const int in_l = in_l_start + f_l; #pragma unroll for (int f_r = 0; f_r < filter_rows; ++f_r) { const int in_r = in_r_start + f_r; // Avoid repeated computation. const int input_offset_temp = (OB * in_depth * in_length * in_rows * in_cols) + (OC * in_length * in_rows * in_cols) + (in_l * in_rows * in_cols) + (in_r * in_cols); #pragma unroll for (int f_c = 0; f_c < filter_cols; ++f_c) { const int in_c = in_c_start + f_c; const int input_offset = input_offset_temp + in_c; #if __CUDA_ARCH__ >= 350 T partial_sum = __ldg(input + input_offset) * out_bp; #else T partial_sum = input[input_offset] * out_bp; #endif T* addr = filter_backprop + (in_d * filter_rows * filter_cols * filter_length) + (f_l * filter_rows * filter_cols) + (f_c + filter_cols * f_r); atomicAdd(addr, partial_sum); } } } } else { #pragma unroll for (int f_l = 0; f_l < filter_length; ++f_l) { const int in_l = in_l_start + f_l; #pragma unroll for (int f_r = 0; f_r < filter_rows; ++f_r) { const int in_r = in_r_start + f_r; // Avoid repeated computation. const int input_offset_temp = (OB * in_depth * in_length * in_rows * in_cols) + (OC * in_length * in_rows * in_cols) + (in_l * in_rows * in_cols) + (in_r * in_cols); #pragma unroll for (int f_c = 0; f_c < filter_cols; ++f_c) { const int in_c = in_c_start + f_c; if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols && in_l >= 0 && in_l < in_length) { const int input_offset = input_offset_temp + in_c; #if __CUDA_ARCH__ >= 350 T partial_sum = __ldg(input + input_offset) * out_bp; #else T partial_sum = input[input_offset] * out_bp; #endif T* addr = filter_backprop + (in_d * filter_rows * filter_cols * filter_length) + (f_l * filter_rows * filter_cols) + (f_c + filter_cols * f_r); atomicAdd(addr, partial_sum); } } } } } } } template <typename T> __global__ void DepthwiseConv3dBackpropInputGPUKernelNCHW( const DepthwiseArgs args, const T* out_backprop, const T* filter, T* in_backprop, int num_in_backprop) { const int in_rows = args.in_rows; const int in_cols = args.in_cols; const int in_length = args.in_length; const int in_depth = args.in_depth; const int filter_rows = args.filter_rows; const int filter_cols = args.filter_cols; const int filter_length = args.filter_length; const int stride = args.stride; const int temporal_stride = args.temporal_stride; const int pad_rows = args.pad_rows; const int pad_cols = args.pad_cols; const int pad_length = args.pad_length; const int out_rows = args.out_rows; const int out_cols = args.out_cols; const int out_length = args.out_length; const int out_depth = args.out_depth; CUDA_1D_KERNEL_LOOP(thread_id, num_in_backprop) { const int IW = thread_id % in_cols; const int IH = (thread_id / in_cols) % in_rows; const int IL = (thread_id / in_cols / in_rows) % in_length; const int IC = (thread_id / in_cols / in_rows / in_length) % in_depth; const int IB = thread_id / in_cols / in_rows / in_length / in_depth; T sum = 0; const int out_r_start = max(0, (IH - filter_rows + pad_rows + stride) / stride); const int out_r_end = min(out_rows - 1, (IH + pad_rows) / stride); const int out_c_start = max(0, (IW - filter_cols + pad_cols + stride) / stride); const int out_c_end = min(out_cols - 1, (IW + pad_cols) / stride); const int out_l_start = max( 0, (IL - filter_length + pad_length + temporal_stride) / temporal_stride); const int out_l_end = min(out_length - 1, (IL + pad_length) / temporal_stride); #pragma unroll for (int out_l = out_l_start; out_l <= out_l_end; ++out_l) { const int f_l = IL + pad_length - out_l * temporal_stride; for (int out_r = out_r_start; out_r <= out_r_end; ++out_r) { const int f_r = IH + pad_rows - out_r * stride; for (int out_c = out_c_start; out_c <= out_c_end; ++out_c) { const int f_c = IW + pad_cols - out_c * stride; const int filter_offset = IC * filter_rows * filter_cols * filter_length + f_l * filter_cols * filter_rows + f_r * filter_cols + f_c; const int out_backprop_offset = (IB * out_depth * out_length * out_rows * out_cols) + (IC * out_length * out_rows * out_cols) + (out_l * out_rows * out_cols) + (out_r * out_cols) + (out_c); #if __CUDA_ARCH__ >= 350 sum += __ldg(out_backprop + out_backprop_offset) * __ldg(filter + filter_offset); #else sum += out_backprop[out_backprop_offset] * filter[filter_offset]; #endif } } } const int in_backprop_offset = (IB * in_rows * in_cols * in_length * in_depth) + (IC * in_rows * in_cols * in_length) + (IL * in_rows * in_cols) + (IH * in_cols) + (IW); in_backprop[in_backprop_offset] = sum; } } class ChannelwiseConv3dOp final : public ConvPoolOpBase<CUDAContext> { public: USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext); ChannelwiseConv3dOp(const OperatorDef& operator_def, Workspace* ws) : ConvPoolOpBase<CUDAContext>(operator_def, ws), cudnn_wrapper_(&context_) { OPERATOR_NEEDS_FEATURE( this->order_ == StorageOrder::NCHW, "ChannelwiseConv3dOp only supports NCHW order"); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_)); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_)); } ~ChannelwiseConv3dOp() { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_)); } bool RunOnDeviceWithOrderNCHW() override { const Tensor& X = Input(0); auto& filter = Input(1); const int C = X.dim32(1); CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); const int M = filter.dim32(0); // number of output filters // enforce input/output filters are the same CAFFE_ENFORCE_EQ(M, X.dim32(1)); CAFFE_ENFORCE_EQ(C, X.dim32(1)); // check group parameters CAFFE_ENFORCE_EQ(C, this->group_); CAFFE_ENFORCE_GT(this->group_, 1); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, filter.dim32(0)); Tensor* Y = Output(0, sizes, at::dtype<float>()); DepthwiseArgs args; args.batch = X.dim32(0); args.in_length = X.dim32(2); args.in_rows = X.dim32(3); args.in_cols = X.dim32(4); args.in_depth = X.dim32(1); CAFFE_ENFORCE_EQ(kernel_.size(), 3); args.filter_cols = kernel_[2]; args.filter_rows = kernel_[1]; args.filter_length = kernel_[0]; CAFFE_ENFORCE_EQ(stride_.size(), 3); args.stride = stride_[1]; CAFFE_ENFORCE_EQ(stride_[1], stride_[2]); args.temporal_stride = stride_[0]; CAFFE_ENFORCE_EQ(pads_.size(), 6); args.pad_length = pads_[0]; args.pad_rows = pads_[1]; args.pad_cols = pads_[2]; CAFFE_ENFORCE_EQ(Y->dim32(0), X.dim32(0)); args.out_rows = Y->dim32(3); args.out_cols = Y->dim32(4); args.out_length = Y->dim32(2); args.out_depth = Y->dim32(1); DepthwiseConv3dGPUKernelNCHW<float> <<<CAFFE_GET_BLOCKS(Y->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( args, X.data<float>(), filter.data<float>(), Y->mutable_data<float>(), Y->size()); if (InputSize() == 3) { std::vector<int> bias_dims(X.ndim(), 1); bias_dims[1] = M; std::vector<int> strides = {M, 1, 1, 1, 1}; CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( bias_desc_, cudnnTypeWrapper<float>::type, X.ndim(), bias_dims.data(), strides.data())); vector<int> dims = { Y->dim32(0), M, Y->dim32(2), Y->dim32(3), Y->dim32(4)}; strides = {M * Y->dim32(2) * Y->dim32(3) * Y->dim32(4), Y->dim32(2) * Y->dim32(3) * Y->dim32(4), Y->dim32(3) * Y->dim32(4), Y->dim32(4), 1}; CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( top_desc_for_bias_, cudnnTypeWrapper<float>::type, X.ndim(), dims.data(), strides.data())); auto& bias = Input(2); CAFFE_ENFORCE_EQ(bias.ndim(), 1); CAFFE_ENFORCE_EQ(bias.dim32(0), M); CUDNN_ENFORCE(cudnnAddTensor( cudnn_wrapper_.inline_cudnn_handle(), cudnnTypeWrapper<float>::kOne(), bias_desc_, bias.data<float>(), cudnnTypeWrapper<float>::kOne(), top_desc_for_bias_, Y->mutable_data<float>())); } return true; } private: CuDNNWrapper cudnn_wrapper_; cudnnTensorDescriptor_t bias_desc_; cudnnTensorDescriptor_t top_desc_for_bias_; }; class ChannelwiseConv3dGradientOp final : public ConvPoolOpBase<CUDAContext> { public: USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext); ChannelwiseConv3dGradientOp(const OperatorDef& operator_def, Workspace* ws) : ConvPoolOpBase<CUDAContext>(operator_def, ws), cudnn_wrapper_(&context_), no_bias_(OperatorBase::GetSingleArgument<int>("no_bias", 0)) { CAFFE_ENFORCE( !(no_bias_ && OutputSize() == 3), "If bias is not present, you should not have 3 grad output."); OPERATOR_NEEDS_FEATURE( this->order_ == StorageOrder::NCHW, "ChannelwiseConv3dGradientOp only supports NCHW order"); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_)); CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_)); } ~ChannelwiseConv3dGradientOp() { CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_)); CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_)); } bool RunOnDeviceWithOrderNCHW() override { auto& X = Input(INPUT); auto& filter = Input(FILTER); auto& dY = Input(OUTPUT_GRAD); auto* dfilter = Output(FILTER_GRAD); const int C = X.dim32(1); const vector<int> input_dims = this->GetDims(X); ConvPoolOpBase<CUDAContext>::ComputePads(input_dims); CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); const int M = filter.dim32(0); CAFFE_ENFORCE(filter.dim32(1) * group_ == C); CAFFE_ENFORCE(M % group_ == 0); dfilter->ResizeLike(filter); DepthwiseArgs args; args.batch = X.dim32(0); args.in_rows = X.dim32(3); args.in_cols = X.dim32(4); args.in_length = X.dim32(2); args.in_depth = X.dim32(1); args.filter_cols = kernel_[2]; args.filter_rows = kernel_[1]; args.filter_length = kernel_[0]; args.stride = stride_[1]; CAFFE_ENFORCE_EQ(stride_[1], stride_[2]); args.temporal_stride = stride_[0]; args.pad_length = pads_[0]; args.pad_rows = pads_[1]; args.pad_cols = pads_[2]; args.out_rows = dY.dim32(3); args.out_cols = dY.dim32(4); args.out_length = dY.dim32(2); args.out_depth = dY.dim32(1); CAFFE_ENFORCE(OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))); auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD); dX->ResizeLike(X); math::Set<float, CUDAContext>( dfilter->size(), 0, dfilter->mutable_data<float>(), &context_); DepthwiseConv3dBackpropFilterGPUKernelNCHW<float> <<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( args, dY.data<float>(), X.data<float>(), dfilter->mutable_data<float>(), dY.size()); DepthwiseConv3dBackpropInputGPUKernelNCHW<float> <<<CAFFE_GET_BLOCKS(dX->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( args, dY.data<float>(), filter.data<float>(), dX->mutable_data<float>(), dX->size()); if (!no_bias_) { std::vector<int> bias_dims(X.ndim(), 1); bias_dims[1] = M; std::vector<int> strides = {M, 1, 1, 1, 1}; CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( bias_desc_, cudnnTypeWrapper<float>::type, X.ndim(), bias_dims.data(), strides.data())); std::vector<int> dims = { dY.dim32(0), M, dY.dim32(2), dY.dim32(3), dY.dim32(4)}; strides = {M * dY.dim32(2) * dY.dim32(3) * dY.dim32(4), dY.dim32(2) * dY.dim32(3) * dY.dim32(4), dY.dim32(3) * dY.dim32(4), dY.dim32(4), 1}; CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( top_desc_for_bias_, cudnnTypeWrapper<float>::type, X.ndim(), dims.data(), strides.data())); auto* dbias = Output(BIAS_OR_INPUT_GRAD); dbias->Resize(M); CUDNN_ENFORCE(cudnnConvolutionBackwardBias( cudnn_wrapper_.inline_cudnn_handle(), cudnnTypeWrapper<float>::kOne(), top_desc_for_bias_, dY.data<float>(), cudnnTypeWrapper<float>::kZero(), bias_desc_, dbias->mutable_data<float>())); } return true; } private: CuDNNWrapper cudnn_wrapper_; cudnnTensorDescriptor_t bias_desc_; cudnnTensorDescriptor_t top_desc_for_bias_; bool no_bias_; INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD); OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD); }; REGISTER_CUDA_OPERATOR_WITH_ENGINE(Conv, CHANNELWISE_3D, ChannelwiseConv3dOp); REGISTER_CUDA_OPERATOR_WITH_ENGINE( ConvGradient, CHANNELWISE_3D, ChannelwiseConv3dGradientOp); } // namespace caffe2
the_stack
// #include "lrbn.cuh" using namespace at; using namespace at::native; // The maximum number of threads in a block #if defined(__HIP_PLATFORM_HCC__) constexpr int MAX_BLOCK_SIZE = 256; #else constexpr int MAX_BLOCK_SIZE = 512; #endif // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { #if defined(__HIP_PLATFORM_HCC__) int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE }; #else int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; #endif for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename scalar_t, typename accscalar_t> struct Float2 { accscalar_t v1, v2; __device__ Float2() {} __device__ Float2(scalar_t v1, scalar_t v2) : v1(static_cast<accscalar_t>(v1)), v2(static_cast<accscalar_t>(v2)) {} __device__ Float2(int v) : v1(static_cast<accscalar_t>(v)), v2(static_cast<accscalar_t>(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { for (int i = 0; i < getMSB(C10_WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, C10_WARP_SIZE); } return val; } template <typename scalar_t, typename accscalar_t> static __device__ __forceinline__ Float2<scalar_t, accscalar_t> warpSum(Float2<scalar_t, accscalar_t> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template <typename scalar_t, typename accscalar_t, typename PTA> struct GradOp { __device__ GradOp(accscalar_t m, const PTA& i, const PTA& g) : mean(m), input(i), grad_output(g) {} __device__ __forceinline__ Float2<scalar_t, accscalar_t> operator()(int batch, int plane, int n) { accscalar_t g = grad_output[batch][plane][n]; accscalar_t c = static_cast<accscalar_t>(input[batch][plane][n]) - mean; return Float2<scalar_t, accscalar_t>(g, g * c); } const accscalar_t mean; const PTA& input; const PTA& grad_output; }; // Sum across (batch, x/y/z) applying Op() pointwise // this works by first having each thread sum it's part // of the data. Then there is a double-shuffeling reduction. // First each warp (of C10_WARP_SIZE threads) uses warpSum to reduce its // data to the "warp leader", who writes its value into shared memory. // Then a single warp reads the remaining (at most C10_WARP_SIZE) items // and reduces them using another warpSum. // The implicit assumption is that there are no more // than C10_WARP_SIZE**2 threads. template<typename scalar_t, typename Op, typename PTA> __device__ scalar_t reduce(Op op, PTA tensor, int plane) { // first the reductions each thread does separately scalar_t sum = static_cast<scalar_t>(0); for (int batch = threadIdx.y; batch < tensor.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x) { sum += op(batch, plane, x); } } // first warpSum to get one value per thread to // one value per warp sum = warpSum(sum); // this writes each warps item into shared memory // there are at most C10_WARP_SIZE items left because // there are at most C10_WARP_SIZE**2 threads at the beginning __shared__ scalar_t shared[C10_WARP_SIZE]; __syncthreads(); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (tid % C10_WARP_SIZE == 0) { shared[tid / C10_WARP_SIZE] = sum; } if (tid >= blockDim.x * blockDim.y / C10_WARP_SIZE && tid < C10_WARP_SIZE) { // zero out the other entries in shared shared[tid] = (scalar_t)0; } __syncthreads(); // now have a second warpSum to reduce the intermediate values // from shared memory to a single number. The very first // thread writes it to shared memory. if (tid / C10_WARP_SIZE == 0) { sum = warpSum(shared[tid]); if (tid == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole grad_input return shared[0]; } template <typename input_scalar_t, typename stat_scalar_t, typename stat_accscalar_t, typename index_t> __global__ void batch_norm_backward_kernel1( const torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> input, const torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> grad_output, torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> grad_input, torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> grad_weight, torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> grad_bias, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> weight, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> bias, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> running_mean, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> running_var, const torch::GenericPackedTensorAccessor<stat_accscalar_t, 1, torch::DefaultPtrTraits, index_t> save_mean, const torch::GenericPackedTensorAccessor<stat_accscalar_t, 1, torch::DefaultPtrTraits, index_t> save_invstd, bool train) { index_t plane = blockIdx.x; index_t N = grad_output.size(0) * grad_output.size(2); stat_accscalar_t mean, invstd; if (train) { mean = save_mean[plane]; invstd = save_invstd[plane]; } else { mean = stat_accscalar_t(0); invstd = stat_accscalar_t(1); } stat_accscalar_t weight_val = weight.size(0) > 0 ? static_cast<stat_accscalar_t>(weight[plane]) : stat_accscalar_t(1); stat_accscalar_t bias_val = bias.size(0) > 0 ? static_cast<stat_accscalar_t>(bias[plane]) : stat_accscalar_t(0); stat_accscalar_t norm = stat_accscalar_t(1) / N; stat_accscalar_t inv_weight = stat_accscalar_t(1) / weight_val; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(grad_output) // 2. DotProduct(input - mean, grad_output) GradOp<input_scalar_t, stat_accscalar_t, torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t>> g(bias_val, input, grad_output); Float2<input_scalar_t, stat_accscalar_t> res = reduce<Float2<input_scalar_t, stat_accscalar_t>, GradOp<input_scalar_t, stat_accscalar_t, torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t>>>(g, grad_output, plane); stat_accscalar_t grad_output_sum = res.v1; stat_accscalar_t dot_p = res.v2; stat_accscalar_t grad_mean = grad_output_sum * norm; stat_accscalar_t proj_scale = dot_p * norm * inv_weight * inv_weight; stat_accscalar_t grad_scale = invstd * weight_val; if (grad_input.data() != NULL) { for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) { input_scalar_t go = grad_output[batch][plane][x]; if (train) { stat_accscalar_t inp = input[batch][plane][x]; stat_accscalar_t proj = (inp - bias_val) * proj_scale; grad_input[batch][plane][x] = static_cast<input_scalar_t>((go - proj - grad_mean) * grad_scale); } else { grad_input[batch][plane][x] = static_cast<input_scalar_t>(go * grad_scale); } } } } if (grad_weight.size(0) > 0) { if (threadIdx.x == 0) { grad_weight[plane] = static_cast<stat_scalar_t>(dot_p * inv_weight); } } if (grad_bias.size(0) > 0) { if (threadIdx.x == 0) { grad_bias[plane] = static_cast<stat_scalar_t>(grad_output_sum); } } } template <typename input_scalar_t, typename stat_scalar_t, typename stat_accscalar_t, typename index_t> void batch_norm_bwd1( const torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> input, const torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> grad_output, torch::GenericPackedTensorAccessor<input_scalar_t, 3, torch::DefaultPtrTraits, index_t> grad_input, torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> grad_weight, torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> grad_bias, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> weight, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> bias, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> running_mean, const torch::GenericPackedTensorAccessor<stat_scalar_t, 1, torch::DefaultPtrTraits, index_t> running_var, const torch::GenericPackedTensorAccessor<stat_accscalar_t, 1, torch::DefaultPtrTraits, index_t> save_mean, const torch::GenericPackedTensorAccessor<stat_accscalar_t, 1, torch::DefaultPtrTraits, index_t> save_invstd, bool train) { using accscalar_t = at::acc_type<stat_scalar_t, true>; auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input.size(1)); int tf = getNumThreads(input.size(2)); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf)); batch_norm_backward_kernel1<input_scalar_t, stat_scalar_t, accscalar_t, index_t> <<<blocks, threads, 0, stream>>> (input, grad_output, grad_input, grad_weight, grad_bias, weight, bias, running_mean, running_var, save_mean, save_invstd, train); } template void batch_norm_bwd1<double, double, double, int>( const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> input, const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> grad_output, torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> grad_input, torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> grad_weight, torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> grad_bias, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> weight, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> bias, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> running_mean, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> running_var, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> save_mean, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, int> save_invstd, bool train); template void batch_norm_bwd1<double, double, double, long>( const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> input, const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> grad_output, torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> grad_input, torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> grad_weight, torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> grad_bias, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> weight, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> bias, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> running_mean, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> running_var, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> save_mean, const torch::GenericPackedTensorAccessor<double, 1, torch::DefaultPtrTraits, long> save_invstd, bool train); template void batch_norm_bwd1<double, float, float, int>( const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> input, const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> grad_output, torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, int> grad_input, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> grad_weight, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> grad_bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> weight, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> running_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> running_var, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> save_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> save_invstd, bool train); template void batch_norm_bwd1<double, float, float, long>( const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> input, const torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> grad_output, torch::GenericPackedTensorAccessor<double, 3, torch::DefaultPtrTraits, long> grad_input, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> grad_weight, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> grad_bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> weight, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> running_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> running_var, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> save_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> save_invstd, bool train); template void batch_norm_bwd1<float, float, float, int>( const torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, int> input, const torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, int> grad_output, torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, int> grad_input, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> grad_weight, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> grad_bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> weight, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> running_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> running_var, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> save_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, int> save_invstd, bool train); template void batch_norm_bwd1<float, float, float, long>( const torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, long> input, const torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, long> grad_output, torch::GenericPackedTensorAccessor<float, 3, torch::DefaultPtrTraits, long> grad_input, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> grad_weight, torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> grad_bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> weight, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> bias, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> running_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> running_var, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> save_mean, const torch::GenericPackedTensorAccessor<float, 1, torch::DefaultPtrTraits, long> save_invstd, bool train);
the_stack
#include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include "ssids/gpu/kernels/datatypes.h" #include "cuda/cuda_check.h" #define min(x,y) ((x) < (y) ? (x) : (y)) #define FAVOUR2x2 100 #define CBLOCKS 3 #define MCBLOCKS 8 #define BLOCKS 7 #define MBLOCKS 11 #define BLOCK_SIZE 8 #define MAX_CUDA_BLOCKS 65535 using namespace spral::ssids::gpu; namespace /* anon */ { extern __shared__ volatile double SharedMemory[]; __global__ void cu_block_ldlt_init( const int ncols, int *const stat, int *const ind ) { if (threadIdx.x == 0) { stat[0] = ncols; // successful pivots stat[1] = 0; } if (threadIdx.x < ncols) ind[threadIdx.x] = ncols + 1; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_chol_fact( const unsigned int block, const int nrows, // number of rows of the factorized matrix const int ncols, // number of columns thereof const ELEMENT_TYPE *const a, // array of elements of A const int lda, // leading dimension of a volatile ELEMENT_TYPE *const fs // initial L factor (shared mem) ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // load A's offdiagonal tiles into shared memory x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load the diagonal (pivot) tile fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[threadIdx.x + lda*threadIdx.y] : 0.0; } } } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_chol_fact( const unsigned int block, const int nrows, // number of rows of the factorized matrix const int ncols, // number of columns thereof const volatile ELEMENT_TYPE *const fs, // initial L factor (shared mem) ELEMENT_TYPE *const f, // array of elements of L const int ldf // leading dimension of f ) { const int SIZE_X = TILES*TILE_SIZE; int x; // row index for ( int tile = 0; tile < TILES; tile++ ) { if ( tile ) { // upload the relevant elements of fs to f x = ncols + threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ((x < nrows) && (threadIdx.y < ncols)) f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } else if ( block == 0 ) { // upload to f and fd if ( threadIdx.x < ncols && threadIdx.y < ncols ) f[threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; } } // loop through tiles ends here } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_chol( const int block, const int nrows, const int ncols, const ELEMENT_TYPE *const a, const int lda, ELEMENT_TYPE *const f, const int ldf, int *const stat ) { const int SIZE_X = TILES * TILE_SIZE; int ip; ELEMENT_TYPE v; volatile ELEMENT_TYPE *const work = (volatile ELEMENT_TYPE*)SharedMemory; // load A into shared memory dev_init_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, lda, work ); __syncthreads(); for (ip = 0; ip < ncols; ip++) { v = work[ip + SIZE_X*ip]; if ( v <= 0.0 ) { if ((block == 0) && (threadIdx.x == 0) && (threadIdx.y == 0)) stat[0] = ip; return; } v = sqrt(v); __syncthreads(); if (threadIdx.y < TILES) work[threadIdx.x + TILE_SIZE*threadIdx.y + SIZE_X*ip] /= v; __syncthreads(); if ((threadIdx.y > ip) && (threadIdx.y < ncols)) { for (int x = threadIdx.x + TILE_SIZE; x < SIZE_X; x += TILE_SIZE) work[x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip] * work[x + SIZE_X*ip]; if (threadIdx.x > ip) work[threadIdx.x + SIZE_X*threadIdx.y] -= work[threadIdx.y + SIZE_X*ip] * work[threadIdx.x + SIZE_X*ip]; } __syncthreads(); } if ((block == 0) && (threadIdx.x == 0) && (threadIdx.y == 0)) stat[0] = ncols; // save the L factor dev_save_chol_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, work, f, ldf ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_chol( const int nrows, const int ncols, const ELEMENT_TYPE *const a, const int lda, ELEMENT_TYPE *const f, const int ldf, int *const stat ) { dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, a, lda, f, ldf, stat ); } struct multinode_chol_type { int nrows; int ncols; double *lcol; }; // input data type for multiblock_fact and multiblock_chol // each CUDA block gets a copy struct multiblock_fact_type { int nrows; // no node's rows int ncols; // no node's cols int ld; // node's leading dimension int p; // no rows above the pivot block double *aptr; // pointer to this node's A matrix double *ldptr; // pointer to this node's LD matrix int offf; // this node's L offset in the array of all Ls double *dptr; // pointer to this node's D in array of all Ds int node; // node index int offb; // the idx of the first CUDA block processing this node }; __global__ void cu_multiblock_fact_setup( struct multinode_fact_type *ndata, struct multiblock_fact_type *const mbfdata, const int step, const int block_size, const int blocks, const int offb, int *const stat, int *const ind, int *const nl ) { ndata += blockIdx.x; const int ncols = ndata->ncols; const int nrows = ndata->nrows; double *const lval = ndata->lval; double *const ldval = ndata->ldval; double *const dval = ndata->dval; int ib = ndata->ib; int jb = ndata->jb; int done = ndata->done; int rght = ndata->rght; const int lbuf = ndata->lbuf; if (jb < ib) return; const int pivoted = stat[blockIdx.x]; if (pivoted > 0) { done += pivoted; if (jb == rght) jb = done; } if (jb <= ncols) ib = jb + 1; __syncthreads(); if ((threadIdx.x == 0) && (threadIdx.y == 0)) { ndata->ib = ib; ndata->jb = jb; ndata->done = done; } if (ib > ncols) return; if (ib > rght) { rght += step; if (rght > ncols) rght = ncols; if ((threadIdx.x == 0) && (threadIdx.y == 0)) ndata->rght = rght; } const int rb = nrows - done; int cb = rght - ib + 1; if (cb > block_size) cb = block_size; if ((threadIdx.x == 0) && (threadIdx.y == 0)) { ndata->jb = jb + cb; stat[blockIdx.x] = cb; // successful pivots } if (ind && (threadIdx.x < cb) && (threadIdx.y == 0)) ind[blockIdx.x*block_size + threadIdx.x] = cb + 1; int k = (rb - cb - 1)/(block_size*(blocks - 1)) + 1; __shared__ volatile int ncb; if ((threadIdx.x == 0) && (threadIdx.y == 0)) ncb = atomicAdd(&nl[0], k); __shared__ volatile int iwork[9]; __shared__ double *volatile lptr, *volatile ldptr, *volatile dptr; if ((threadIdx.x == 0) && (threadIdx.y == 0)) { iwork[0] = cb; iwork[1] = rb; iwork[2] = nrows; iwork[3] = ib - done - 1; lptr = lval + done + (ib - 1)*nrows; ldptr = ldval + done + (ib - 1)*nrows; iwork[5] = lbuf + done; dptr = dval + 2*done; iwork[7] = offb + blockIdx.x; iwork[8] = ncb; } __syncthreads(); for (int i = threadIdx.y; i < k; i += blockDim.y) { switch(threadIdx.x) { case 0: mbfdata[ncb+i].ncols = iwork[0]; break; case 1: mbfdata[ncb+i].nrows = iwork[1]; break; case 2: mbfdata[ncb+i].ld = iwork[2]; break; case 3: mbfdata[ncb+i].p = iwork[3]; break; case 4: mbfdata[ncb+i].aptr = lptr; mbfdata[ncb+i].ldptr = ldptr; break; case 5: mbfdata[ncb+i].offf = iwork[5]; break; case 6: mbfdata[ncb+i].dptr = dptr; break; case 7: mbfdata[ncb+i].node = iwork[7]; break; case 8: mbfdata[ncb+i].offb = i; break; } } } //////////////////////////////////////////////////////////////////////////// /* Functions below participate in the LDLT factorization | A_u P| |L_u| Q A P = |P^T A_d P| = |L_d| * D * (L_d)^T = L * D * (L_d)^T (LDLT) | A_l P| |L_l| where A is nrows x ncols, P is a ncols x ncols permutation matrix, |I_u | Q = | P^T |, where I_u and I_l are identities, | I_l| L_d is a ncols x ncols lower triangular matrix with unit main diagonal and D is a ncols x ncols block diagonal matrix with 1x1 and 2x2 blocks on the main diagonal. Common variable names: nrow number of rows in A/L ncols numbre of columns in A/L offp number of rows in A_u */ //////////////////////////////////////////////////////////////////////////// /* The next function initializes L and the main diagonal and subdiagonal of D**(-1). L and L*D are stored in two shared memory arrays fs and fds, each arranged into TILES square tiles of size TILE_SIZE. The kernel for factorizing just one node uses TILES = 7, and the one for simultaneous factorization of several nodes uses TILES = 11. Each CUDA block uses dev_init_fact to load A_d into the first tile of fs and up to (TILES - 1)*TILE_SIZE rows of A_u and A_l into the remaining TILES - 1 tiles. The two diagonals of D**(-1) are stored in a shared memory array of size 2*TILE_SIZE, initialized to 0 by this kernel. */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_fact( const unsigned int block, // relative CUDA block number const int nrows, const int ncols, const int offp, const ELEMENT_TYPE *const a, // array of elements of A const int lda, // leading dimension of a volatile ELEMENT_TYPE *const fs, // initial L factor (shared mem) volatile ELEMENT_TYPE *const ds // initial D**(-1) (shared mem) ) { const int SIZE_X = TILES * TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // load A_u and A_l's even tiles into shared memory x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; // offdiagonal row index in A if ( x >= offp ) x += ncols; // skip A_d fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y] = ( x < nrows && threadIdx.y < ncols ) ? a[x + lda*threadIdx.y] : 0.0; } else { // load A_d fs[threadIdx.x + SIZE_X*threadIdx.y] = ( threadIdx.x < ncols && threadIdx.y < ncols ) ? a[offp + threadIdx.x + lda*threadIdx.y] : 0.0; } } } else { // load A_u and A_l's odd tiles into shared memory for (int tile = 1; tile < TILES; tile += 2) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if (x >= offp) x += ncols; fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y] = ((x < nrows) && (y < ncols)) ? a[x + lda*y] : 0.0; } } // main diagonal and subdiagonal of D**(-1) set to 0 if (threadIdx.y < 2) ds[2*threadIdx.x + threadIdx.y] = 0.0; } /* The next function uploads L, L*D and D to global memory */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_save_fact( const unsigned int block, const int nrows, const int ncols, const int offp, const int my, // save only if my is non-zero const volatile ELEMENT_TYPE *const fs, // L (shared mem) const volatile ELEMENT_TYPE *const fds, // L*D (shared mem) const volatile ELEMENT_TYPE *const ds, // 2 diags of D**(-1) (shared mem) ELEMENT_TYPE *const f, // L (global mem) const int ldf, // leading dimension of f ELEMENT_TYPE *const fd, // L*D (global mem) const int ldfd, // leading dimension of fd ELEMENT_TYPE *const d // 2 diags of D**(-1) (global mem) ) { const int SIZE_X = TILES * TILE_SIZE; int x, y; // position indices y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread if ( threadIdx.y < TILE_SIZE ) { // warps 0, 1 for ( int tile = 0; tile < TILES; tile += 2 ) { if ( tile ) { // upload L_u, L_l, L_u*D and L_l*D's even tiles x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if ( x >= offp ) // skip L_d x += ncols; if ( x < nrows && threadIdx.y < ncols && my ) { f[x + ldf*threadIdx.y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; fd[x + ldfd*threadIdx.y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*threadIdx.y]; } } else if ( block == 0 ) { // upload L_d and L_d*D if ( threadIdx.x < ncols && threadIdx.y < ncols && my ) { f[offp + threadIdx.x + ldf*threadIdx.y] = fs[threadIdx.x + SIZE_X*threadIdx.y]; fd[offp + threadIdx.x + ldfd*threadIdx.y] = fds[threadIdx.x + SIZE_X*threadIdx.y]; } // upload D**(-1) if ( threadIdx.x < 2 && threadIdx.y < ncols ) d[threadIdx.x + 2*threadIdx.y] = ds[threadIdx.x + 2*threadIdx.y]; } } // loop through even tiles ends here } else { // upload L_u, L_l, L_u*D and L_l*D's odd tiles (warps 2, 3) for (int tile = 1; tile < TILES; tile += 2) { x = threadIdx.x + (tile - 1)*TILE_SIZE + (TILES - 1)*TILE_SIZE*block; if (x >= offp) // skip L_d x += ncols; if ((x < nrows) && (y < ncols) && my) { f[x + ldf*y] = fs[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; fd[x + ldfd*y] = fds[threadIdx.x + tile*TILE_SIZE + SIZE_X*y]; } } } } /* The next function finds the largest element of the first row of A_d */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_init_max( const int ncols, const volatile ELEMENT_TYPE *const fs, const int mx, // this thread mask volatile int *const mask, // pivot index/mask volatile bool *const not_max, // "not largest" flag volatile int &jps, // the index of the largest element volatile int &quit // pivoting failure flag ) { const int SIZE_X = TILES*TILE_SIZE; if (threadIdx.y == 0) { mask[threadIdx.x] = mx; // initialize the pivot index not_max[threadIdx.x] = mx; // initialize the "not largest" flag } if ((threadIdx.x == 0) && (threadIdx.y == 0)) { jps = TILE_SIZE; // initialize pivot col jp: cf the case of a tie below quit = 0; // initialize failure flag } __syncthreads(); // check if the element in the column threadIdx.x // of the first row is (one of) the largest one(s) if ((threadIdx.x < ncols) && (threadIdx.y < ncols) && (threadIdx.x != threadIdx.y) && (fabs(fs[SIZE_X*threadIdx.x]) < fabs(fs[SIZE_X*threadIdx.y]))) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere __syncthreads(); // select the leftmost among the largest elements of the row if ((threadIdx.y == 0) && (not_max[threadIdx.x] == 0)) atomicMin((int*)&jps, threadIdx.x); // in case of a tie, choose the leftmost __syncthreads(); } /* The next function selects pivot based on the pending row number ip and the column number for the largest element in this row. Three options are considered: (1) use 1x1 pivot a11 = fs[ip + ld*ip], (2) use 1x1 pivot a22 = fs[jp + ld*jp], (3) use 2x2 pivot | a_11 a_12 | | a_12 a_22 |, where a12 = fs[ip + ld*jp]. The pivot that has the smallest inverse is selected. */ template< typename ELEMENT_TYPE > __device__ void dev_select_pivots_at_root( const ELEMENT_TYPE *const fs, const int ld, // leading dimension of fs int &ip, int &jp, ELEMENT_TYPE &a11, ELEMENT_TYPE &a12, ELEMENT_TYPE &a22, ELEMENT_TYPE &det ) { // select the pivot based on the row's largest element index if (ip != jp) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (fabs(a12) + fabs(a11) + fabs(a22))*fabs(a11) > fabs(det) ) { if (fabs(a11) > fabs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (fabs(a12) + fabs(a11) + fabs(a22))*fabs(a22) > fabs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } template< typename ELEMENT_TYPE > __device__ void dev_select_pivots( const volatile ELEMENT_TYPE *const fs, const int ld, // leading dimension of fs int &ip, int &jp, ELEMENT_TYPE &a11, ELEMENT_TYPE &a12, ELEMENT_TYPE &a22, ELEMENT_TYPE &det ) { // select the pivot based on the row's largest element index if (ip != jp) { // choose between 1x1 and 2x2 pivots a11 = fs[ip + ld*ip]; a12 = fs[ip + ld*jp]; a22 = fs[jp + ld*jp]; det = a11*a22 - a12*a12; // determinant of 2x2 pivot stored in det if ( (fabs(a12) + fabs(a11) + fabs(a22))*fabs(a11) > FAVOUR2x2*fabs(det) ) { if ( fabs(a11) > fabs(a22) ) { // choose the best 1x1 alternative jp = ip; // select a11 det = a11; // pivot value stored in det } else { ip = jp; // select a22 det = a22; // pivot value stored in det } } else if ( (fabs(a12) + fabs(a11) + fabs(a22))*fabs(a22) > FAVOUR2x2*fabs(det) ) { ip = jp; // select a22 det = a22; // pivot value stored in det } } else det = fs[ip + ld*ip]; // pivot value stored in det } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_1x1_pivot_fails( const int x, const int ip, volatile ELEMENT_TYPE *const fs, volatile ELEMENT_TYPE *const fds, const int ld, const ELEMENT_TYPE det, const ELEMENT_TYPE delta, const ELEMENT_TYPE eps ) { // the column of fds is that of fs before the division by pivot const ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; if ( fabs(det) <= eps ) { // the pivot is considered to be zero if ( fabs(u) <= eps ) { // the off-diagonal is considered to be zero if ( x == ip ) fs[x + ld*ip] = 1.0; else fs[x + ld*ip] = 0.0; } else { // non-zero off-diagonal element found -> return 1; // this column to be delayed } } else if ( fabs(det) <= delta*fabs(u) ) // pivot too small -> return 1; // this column to be delayed else fs[x + ld*ip] = u/det; // ok to divide return 0; } /* The next function tries to apply 1x1 pivot. */ template< typename ELEMENT_TYPE > __device__ bool dev_2x2_pivot_fails( const int x, const int ip, const int jp, volatile ELEMENT_TYPE *const fs, volatile ELEMENT_TYPE *const fds, const int ld, const ELEMENT_TYPE a11, const ELEMENT_TYPE a12, const ELEMENT_TYPE a22, const ELEMENT_TYPE det, const ELEMENT_TYPE delta, const ELEMENT_TYPE eps ) { // the columns of fds is those of fd before division by pivot const ELEMENT_TYPE u = fds[x + ld*ip] = fs[x + ld*ip]; const ELEMENT_TYPE v = fds[x + ld*jp] = fs[x + ld*jp]; if ( fabs(det) <= fabs(a11)*fabs(a22)*1.0e-15 || // the determinant is smaller than round-off errors -> // the pivot is considered to be zero fabs(det) <= eps*(fabs(a11) + fabs(a22) + fabs(a12)) // the inverse of the pivot is of the order 1/eps -> // the pivot is considered to be zero ) { if ( max(fabs(u), fabs(v)) <= eps ) { // the off-diagonal is "zero" if ( x == ip ) { fs[x + ld*ip] = 1.0; fs[x + ld*jp] = 0.0; } else if ( x == jp ) { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 1.0; } else { fs[x + ld*ip] = 0.0; fs[x + ld*jp] = 0.0; } } else // non-zero off-diagonal element found -> return 1; // this column to be delayed } else if ( fabs(det) <= delta*max(fabs(a22*u - a12*v), fabs(a11*v - a12*u)) ) // pivot too small -> return 1; // this column to be delayed else { // ok to divide fs[x + ld*ip] = (a22*u - a12*v)/det; fs[x + ld*jp] = (a11*v - a12*u)/det; } return 0; } /* The next function eliminates the pivoted column from non-pivoted */ template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES // = 7 for a single node and = 11 for many nodes > __device__ void dev_eliminate_1x1( int &x, // row for this thread const int y, // column for this thread const int ip, // pivoted column volatile ELEMENT_TYPE *const fs, const int ld, const ELEMENT_TYPE p // pivot value ) { if ( x != ip ) fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= p * fs[x + ld*ip]; } } /* The next function eliminates the two pivoted columns from non-pivoted */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_eliminate_2x2( int &x, const int y, const int ip, const int jp, volatile ELEMENT_TYPE *const fs, const int ld, const ELEMENT_TYPE pi, const ELEMENT_TYPE pj ) { if ( x != ip && x != jp ) fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; if ( TILES == 11 ) { // several nodes case x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; x += 2*TILE_SIZE; // move to the next tile pair fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } } /* The next function performs elimination in one tile only */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE > inline __device__ void dev_eliminate( int &x, const int y, const int ip, const int jp, volatile ELEMENT_TYPE *const fs, const int ld, const ELEMENT_TYPE pi, const ELEMENT_TYPE pj ) { x += TILE_SIZE; if ( ip == jp ) fs[x + ld*y] -= pi * fs[x + ld*ip]; else fs[x + ld*y] -= pi * fs[x + ld*ip] + pj * fs[x + ld*jp]; } /* Performs the factorization (LDLT). The outline of the factorization algorithm is as follows. 1. L = A 2. A diagonal block of L of size 1 or 2 is selected 3. A division of the corresponding (one or two) columns of L by the selected block (pivoting) is considered and is accepted only if the elements of the resulting columns are not going to be greater than the inverse of the "pivoting threshold" delta; otherwise kernel terminates. 4. If not all columns are pivoted, go to 2. Called by cu_block_ldlt and cu_multiblock_ldlt factorization kernels. */ template< typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __device__ void dev_block_ldlt( const unsigned int block, const int nrows, // number of rows of the factorized matrix const int ncols, // number of columns thereof const int offp, // number of rows above the pivot block ELEMENT_TYPE *const a, // array of elements of A const int lda, // leading dimension of a ELEMENT_TYPE *const f, // array of elements of the L factor const int ldf, // leading dimension of f ELEMENT_TYPE *const fd, // array of elements of L*D const int ldfd, // leading dimension of fd ELEMENT_TYPE *const d, // array for main diagonal and subdiagonal of D const ELEMENT_TYPE delta, // pivoting threashold const ELEMENT_TYPE eps, // zero pivot threashold int *const index, // pivot order index int *const stat // number of successful pivots ) { const int SIZE_X = TILES*TILE_SIZE; int ip, jp; // pivot row and col indices int x, y; // position indices int mx, my; // masks ELEMENT_TYPE a11, a12, a22, det; // 2x2 pivot data __shared__ volatile ELEMENT_TYPE fs[SIZE_X*TILE_SIZE]; // work array for f __shared__ volatile ELEMENT_TYPE fds[SIZE_X*TILE_SIZE]; // work array for fd __shared__ volatile ELEMENT_TYPE ds[2*TILE_SIZE]; // work array for d __shared__ volatile int mask[TILE_SIZE]; // pivot mask/index __shared__ volatile bool not_max[TILE_SIZE]; // flag for finding the largest row elm __shared__ volatile int quit; // failure flag __shared__ volatile int jps; // pivot column index y = threadIdx.y % TILE_SIZE; // fs & fds column processed by this thread // load the diagonal and off-diagonal tiles into shared memory dev_init_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, a, lda, fs, ds ); mx = (threadIdx.x < ncols ? 0 : ncols + 1); // initial pivot index // find the largest element in the first row dev_init_max< ELEMENT_TYPE, TILE_SIZE, TILES > ( ncols, fs, mx, mask, not_max, jps, quit ); for ( int row = 0, pivoted = 0; row < ncols; ) { // select the pivot based on the row's largest element index jps ip = row; jp = jps; dev_select_pivots< ELEMENT_TYPE > ( fs, SIZE_X, ip, jp, a11, a12, a22, det ); __syncthreads(); if ( threadIdx.y < TILE_SIZE + 4 ) { // the first 3 warps try to pivot x = threadIdx.x + TILE_SIZE*threadIdx.y; // fs/fds row to process if ( x < SIZE_X && (threadIdx.y || mx == 0 || mx > ncols) ) { // elements of the pivot block that should have been // zeroed by elimination are ignored if ( ip == jp ) { // 1x1 pivot if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, fs, fds, SIZE_X, det, delta, eps ) ) quit = 1; } else { // 2x2 pivot if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, fs, fds, SIZE_X, a11, a12, a22, det, delta, eps ) ) quit = 1; } } } else { // meanwhile, one thread of the fourth warp is inverting the pivot if ( threadIdx.x == 0 && threadIdx.y == TILE_SIZE + 4 ) { mask[ip] = pivoted + 1; // assume pivot is ok for now if ( ip == jp ) { if ( fabs(det) > eps ) ds[2*pivoted] = 1.0/det; // ok to invert } else { mask[jp] = pivoted + 2; // assume pivot is ok for now if ( fabs(det) > fabs(a11)*fabs(a22)*1.0e-15 && fabs(det) > eps*(fabs(a11) + fabs(a22) + fabs(a12)) ) { ds[2*pivoted ] = a22/det; ds[2*pivoted + 1] = -a12/det; ds[2*pivoted + 2] = a11/det; } } if ( atomicMin(&stat[0], ncols) <= pivoted ) quit = 1; // some other CUDA block failed to pivot this column } } // warp fork ends here __syncthreads(); if ( quit ) { if ( threadIdx.x == 0 && threadIdx.y == 0 ) { atomicMin(&stat[0], pivoted); // record the failure in stat[0] // column(s) should not be saved - mark as non-processed mask[ip] = 0; if ( ip != jp ) mask[jp] = 0; } __syncthreads(); break; // done } // update successful pivots count if ( ip == jp ) pivoted++; else pivoted += 2; // find next pivot row to process if ( ip == row ) row++; // move forward only if this row participated in pivoting while ( row < ncols && mask[row] ) row++; // skip processed rows (parts of previous 2x2 pivots) // eliminate the recently pivoted column(s) from the rest // first row to be processed by this thread x = threadIdx.x + (threadIdx.y/TILE_SIZE)*TILE_SIZE; mx = mask[threadIdx.x]; my = mask[y]; // process the first (TILES - 3) tiles right away; // the even tiles are processed by the first two warps, // the odd by the other two if ( ip == jp ) { a11 = fs[ip + SIZE_X*y]; if ( my == 0 ) dev_eliminate_1x1< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, fs, SIZE_X, a11 ); } else { a11 = fs[ip + SIZE_X*y]; a12 = fs[jp + SIZE_X*y]; if ( my == 0 ) dev_eliminate_2x2< ELEMENT_TYPE, TILE_SIZE, TILES > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } // from here on, the first two warps deal with finding the largest element // in the next pivot row, while the other two continue elimination // in the remaining three tiles if ( threadIdx.y < TILE_SIZE ) { if ( row < ncols && threadIdx.y == 0 ) { not_max[threadIdx.x] = mx; // mask away processed elements if ( threadIdx.x == 0 ) jps = TILE_SIZE; // initialise the largest element column index } } else { // do elimination in the (TILES - 2)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // mark elements in the pending row that cannot be largest if ( row < ncols ) { // check the element in column threadIdx.x if ( threadIdx.x != threadIdx.y && mx == 0 && my == 0 && fabs(fs[row + SIZE_X*threadIdx.x]) < fabs(fs[row + SIZE_X*threadIdx.y]) ) not_max[threadIdx.x] = 1; // no good: a larger value exists elsewhere } } else { // do elimination in the (TILES - 1)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); if ( threadIdx.y < TILE_SIZE ) { // select leftmost largest element in the row if ( row < ncols ) { if ( threadIdx.y == 0 && not_max[threadIdx.x] == 0 ) atomicMin((int*)&jps, threadIdx.x); // in case of a tie, choose the leftmost } } else { // do elimination in the (TILES)-th tile if ( my == 0 ) dev_eliminate< ELEMENT_TYPE, TILE_SIZE > ( x, y, ip, jp, fs, SIZE_X, a11, a12 ); } __syncthreads(); } // for loop through pivot rows ends here my = mask[y]; // update successful pivot ordering in index; // if this CUDA block failed to pivot the part of column threadIdx.y of A // delegated to it, then possible successful pivoting of its other parts // by other blocks is canceled by zeroing index[threadIdx.y]; // if some other part of this column is unsuccessful, index[threadIdx.y] // remains zero if ( threadIdx.x == 0 && threadIdx.y < ncols ) atomicMin(&index[threadIdx.y], my); // save L and D factors and LD dev_save_fact< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, offp, my, fs, fds, ds, f, ldf, fd, ldfd, d ); } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_block_ldlt( const int nrows, // n.o. rows in A const int ncols, // n.o. cols in A (<= TILE_SIZE) const int offp, // n.o. rows in A_u ELEMENT_TYPE *const a, // array of A's elements const int lda, // leading dimension of a ELEMENT_TYPE *const f, // array of L's elements const int ldf, // leading dimension of f ELEMENT_TYPE *const fd, // array of (L*D)'s elements const int ldfd, // leading dimension of fd ELEMENT_TYPE *const d, // array of D**(-1)'s diagonal and subdiagonal elements const ELEMENT_TYPE delta, // pivoting threshold const ELEMENT_TYPE eps, // zero column threshold: // the column is zeroed if all elements are <= eps int *const index, // pivot index (cf. permutation matrix P) int *const stat // n.o. successful pivots ) { dev_block_ldlt< ELEMENT_TYPE, TILE_SIZE, TILES > ( blockIdx.x, nrows, ncols, offp, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); return; } // Same as cu_block_fact but for several A's of different size simultaneously // // Called by multinode_ldlt factorization subroutine. // template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_ldlt( struct multiblock_fact_type *mbfdata, // factorization data ELEMENT_TYPE *f, // same for L const ELEMENT_TYPE delta, // same as in cu_block_fact const ELEMENT_TYPE eps, // same as in cu_block_fact int *const index, // array of all pivot indices int *const stat // array of successful pivots' numbers ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; // shift to the data for this CUDA block int ncols = mbfdata->ncols; // n.o. cols in A processed by this CUDA block if ( ncols < 1 ) return; int nrows = mbfdata->nrows; // n.o. rows in A int lda = mbfdata->ld; // leading dimension of A int p = mbfdata->p; // n.o. rows in A_u int node = mbfdata->node; // A's number int block = mbfdata->offb; // relative CUDA block index f += mbfdata->offf; // shift to the array of this L elements double *fd = mbfdata->ldptr; double *a = mbfdata->aptr; // pointer to A double *d = mbfdata->dptr; // pointer to D**(-1) dev_block_ldlt < double, TILE_SIZE, TILES > ( block, nrows, ncols, p, a, lda, f, lda, fd, lda, d, delta, eps, &index[node*TILE_SIZE], &stat[node]); } /* LDLT factorization kernel for the root delays block. The columns which the above factorization kernels failed to pivot are delayed, ie left unchanged, until some other columns in the same node are successfully pivoted, after which pivoting of delayed columns is attempted again. When a factorization subroutine terminates, generally there still may be delayed columns which this subroutine cannot possibly pivot, and they are passed on to the parent node in the elimination tree. At the root node, however, this is not possible, and a special kernel given below is applied to delayed columns, which together with the respective rows now form a square block at the lower left corner of the root node matrix. The main difference between the factorization kernel below and those above is that the pivot is sought in the whole matrix because, in the above notation, blocks A_u and A_l are no longer present. Since this matrix may be too large to fit into shared memory, the kernel below works mostly in the global memory (shared memory is only used for finding the largest element of a column). */ template< typename ELEMENT_TYPE > __global__ void cu_square_ldlt( const int n, ELEMENT_TYPE *const a, // A on input, L on output ELEMENT_TYPE *const f, // L ELEMENT_TYPE *const w, // L*D ELEMENT_TYPE *const d, // main diag and subdiag of the inverse of D const int ld, // leading dimension of a, f, w const ELEMENT_TYPE delta, // same as above const ELEMENT_TYPE eps, // same as above int *const ind, // same as in cu_block_fact int *const stat // same as in cu_block_fact ) { int x, y; int col; int ip, jp; int pivoted, recent; ELEMENT_TYPE a11, a12, a22, det; volatile ELEMENT_TYPE *work = (volatile ELEMENT_TYPE*)SharedMemory; // work array volatile int *const iwork = (volatile int*)&(work[blockDim.x]); // integer work array volatile int *const iw = (volatile int*)&(iwork[blockDim.x]); // iw[0]: failure flag, // iw[1]: largest col. elem. index for ( x = threadIdx.x; x < n; x += blockDim.x ) { ind[x] = 0; // initialize pivot index/processed columns mask for ( y = 0; y < n; y++ ) f[x + ld*y] = a[x + ld*y]; // copy A to L } for ( x = threadIdx.x; x < 2*n; x += blockDim.x ) d[x] = 0.0; // initialize D __syncthreads(); pivoted = 0; // n.o. pivoted cols for ( int pass = 0; ; pass++ ) { // failed cols are skipped until next pass recent = 0; // n.o. cols pivoted during this pass for ( col = 0; col < n; ) { if ( ind[col] ) { col++; // already pivoted, move on continue; } if ( threadIdx.x == 0 ) iw[0] = 0; // initialize failure flag __syncthreads(); // find the largest element in the pending column // // first, each thread finds its candidate for the largest one a11 = -1.0; y = -1; for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] == 0 ) { a12 = fabs(f[x + ld*col]); if ( a12 >= a11 ) { a11 = a12; y = x; } } } work[threadIdx.x] = a11; // the largest one for this thread iwork[threadIdx.x] = y; // its index __syncthreads(); // now first 8 threads reduce the number of candidates to 8 if ( threadIdx.x < 8 ) { for ( x = threadIdx.x + 8; x < blockDim.x; x += 8 ) if ( iwork[x] >= 0 && work[x] > work[threadIdx.x] ) { work[threadIdx.x] = work[x]; iwork[threadIdx.x] = iwork[x]; } } __syncthreads(); // the first thread finds the largest element and its index if ( threadIdx.x == 0 ) { y = 0; for ( x = 1; x < 8 && x < blockDim.x; x++ ) if ( iwork[x] >= 0 && (iwork[y] < 0 || work[x] > work[y]) ) y = x; iw[1] = iwork[y]; // the largest element index } __syncthreads(); // select the pivot based on the largest element index ip = col; jp = iw[1]; dev_select_pivots_at_root< ELEMENT_TYPE > ( f, ld, ip, jp, a11, a12, a22, det ); // try to pivot if ( ip == jp ) { // 1x1 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_1x1_pivot_fails< ELEMENT_TYPE > ( x, ip, f, w, ld, det, delta, eps ) ) iw[0] = 1; } else { // 2x2 pivot for ( x = threadIdx.x; x < n; x += blockDim.x ) if ( ind[x] == 0 ) if ( dev_2x2_pivot_fails< ELEMENT_TYPE > ( x, ip, jp, f, w, ld, a11, a12, a22, det, delta, eps ) ) iw[0] = 1; } __syncthreads(); if ( iw[0] ) { // pivot failed, restore the failed column(s) for ( x = threadIdx.x; x < n; x += blockDim.x ) { if ( ind[x] ) continue; f[x + ld*ip] = w[x + ld*ip]; if ( ip != jp ) f[x + ld*jp] = w[x + ld*jp]; } __syncthreads(); col++; // move on continue; } if ( threadIdx.x == 0 ) { // mark pivoted columns and invert the pivot if possible ind[ip] = pivoted + 1; if ( ip == jp ) { if ( fabs(det) > eps ) // ok to invert d[2*pivoted] = 1.0/det; } else { ind[jp] = pivoted + 2; if ( fabs(det) > fabs(a11)*fabs(a22)*1.0e-15 && fabs(det) > eps*(fabs(a11) + fabs(a22) + fabs(a12)) ) { // ok to invert d[2*pivoted ] = a22/det; d[2*pivoted + 1] = -a12/det; d[2*pivoted + 2] = a11/det; } } } __syncthreads(); // update pivot counters if ( ip == jp ) { pivoted++; recent++; } else { pivoted += 2; recent += 2; } // eliminate pivoted columns from non-processed if ( ip == jp ) { for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) if ( x != ip && ind[y] == 0 ) f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y]; } else { for ( x = threadIdx.x; x < n; x += blockDim.x ) { for ( y = 0; y < n; y++ ) { if ( x != ip && x != jp && ind[y] == 0 ) { f[x + ld*y] -= f[x + ld*ip] * f[ip + ld*y] + f[x + ld*jp] * f[jp + ld*y]; } } } } __syncthreads(); if ( ip == col ) // this column is pivoted, move on col++; } // loop across columns if ( pivoted == n // all done || recent == 0 ) // no pivotable columns left break; } // pass if ( threadIdx.x == 0 ) stat[0] = pivoted; if ( pivoted < n ) // factorization failed return; // copy L to A for ( x = threadIdx.x; x < n; x += blockDim.x ) for ( y = 0; y < n; y++ ) a[ind[x] - 1 + ld*(ind[y] - 1)] = f[x + ld*y]; } template < typename ELEMENT_TYPE, unsigned int TILE_SIZE, unsigned int TILES > __global__ void cu_multiblock_chol( struct multiblock_fact_type *mbfdata, ELEMENT_TYPE *f, // array of L nodes int *stat // execution status ) { /* * Read information on what to do from global memory */ mbfdata += blockIdx.x; int ncols = mbfdata->ncols; if ( ncols < 1 ) return; int nrows = mbfdata->nrows; int ld = mbfdata->ld; int node = mbfdata->node; int block = mbfdata->offb; ELEMENT_TYPE *const a = mbfdata->aptr; f += mbfdata->offf; stat += node; dev_block_chol< ELEMENT_TYPE, TILE_SIZE, TILES > ( block, nrows, ncols, a, ld, f, ld, stat ); } struct cstat_data_type { int nelim; double *dval; }; __global__ void cu_collect_stats( const struct cstat_data_type *csdata, struct cuda_stats *const stats ) { // Designed to be run with a single thread csdata += blockIdx.x; double *const d = csdata->dval; const int nelim = csdata->nelim; int num_zero = 0; int num_neg = 0; int num_two = 0; for (int i = 0; i < nelim; ) { const double a11 = d[2*i]; const double a21 = d[2*i + 1]; if ( a21 == 0.0 ) { // 1x1 pivot (can be a zero pivot) if ( a11 == 0 ) num_zero++; if ( a11 < 0 ) num_neg++; i++; } else { // 2x2 pivot (can't be a zero pivot) const double a22 = d[2*(i + 1)]; num_two++; // To check for negative eigenvalues, we exploit // det = product of evals // trace = sum of evals // if det is negative, exactly one eval is negative; // otherwise, both have same sign, equal to sign of trace const double det = a11*a22 - a21*a21; const double trace = a11 + a22; if ( det < 0 ) num_neg++; else if ( trace < 0 ) num_neg += 2; i += 2; } } if ( num_neg > 0 ) atomicAdd(&(stats->num_neg), num_neg); if ( num_zero > 0 ) atomicAdd(&(stats->num_zero), num_zero); if ( num_two > 0 ) atomicAdd(&(stats->num_two), num_two); } } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_block_ldlt( cudaStream_t *stream, int nrows, int ncols, int p, double* a, int lda, double* f, int ldf, double* fd, int ldfd, double* d, double delta, double eps, int* index, int* stat ) { int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(BLOCKS - 1)) + 1; cu_block_ldlt_init<<< 1, BLOCK_SIZE, 0, *stream >>>( ncols, stat, index ); dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); cu_block_ldlt < double, BLOCK_SIZE, BLOCKS > <<< nblocks, threads, 0, *stream >>> ( nrows, ncols, p, a, lda, f, ldf, fd, ldfd, d, delta, eps, index, stat ); } void spral_ssids_block_llt( cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* f, int ldf, int* stat ) { int smsize = CBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); int nblocks = (nrows - ncols - 1)/(BLOCK_SIZE*(CBLOCKS - 1)) + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); cu_block_chol < double, BLOCK_SIZE, CBLOCKS > <<< nblocks, threads, smsize, *stream >>> ( nrows, ncols, a, lda, f, ldf, stat ); } void spral_ssids_collect_stats(cudaStream_t *stream, int nblk, const struct cstat_data_type *csdata, struct cuda_stats *stats) { for(int i=0; i<nblk; i+=MAX_CUDA_BLOCKS) { int nb = min(MAX_CUDA_BLOCKS, nblk-i); cu_collect_stats <<<nb, 1, 0, *stream>>> (csdata+i, stats); CudaCheckError(); } } void spral_ssids_multiblock_ldlt( cudaStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, double delta, double eps, int* index, int* stat ) { dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_ldlt < double, BLOCK_SIZE, MBLOCKS > <<< nb, threads, 0, *stream >>> ( mbfdata + i, f, delta, eps, index, stat ); } } void spral_ssids_multiblock_ldlt_setup( cudaStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ind, int* ncb ) { dim3 threads(10,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_fact_setup <<< nb, threads, 0, *stream >>> ( ndata + i, mbfdata, step, block_size, blocks, i, stat + i, ind + block_size*i, ncb ); } } void spral_ssids_multiblock_llt( cudaStream_t *stream, int nblocks, struct multiblock_fact_type *mbfdata, double* f, int* stat ) { if ( nblocks < 1 ) return; int smsize = MCBLOCKS*BLOCK_SIZE*BLOCK_SIZE*sizeof(double); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_chol < double, BLOCK_SIZE, MCBLOCKS > <<< nb, threads, smsize, *stream >>> ( mbfdata + i, f, stat ); } } void spral_ssids_multiblock_llt_setup( cudaStream_t *stream, int nblocks, struct multinode_fact_type *ndata, struct multiblock_fact_type *mbfdata, int step, int block_size, int blocks, int* stat, int* ncb ) { dim3 threads(16,8); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multiblock_fact_setup <<< nb, threads, 0, *stream >>> ( ndata + i, mbfdata, step, block_size, blocks, i, stat + i, 0, ncb ); } } void spral_ssids_square_ldlt( cudaStream_t *stream, int n, double* a, double* f, double* w, double* d, int ld, double delta, double eps, int* index, int* stat ) { int nt = min(n, 256); int sm = nt*sizeof(double) + (nt + 2)*sizeof(int); cu_square_ldlt< double ><<< 1, nt, sm, *stream >>> ( n, a, f, w, d, ld, delta, eps, index, stat ); } } // end extern "C"
the_stack
using namespace std; #include <stdio.h> #include <time.h> #include <assert.h> #include <stdlib.h> #include <iomanip> #include <cstdlib> #include <vector> // cuda #include <cuda_runtime.h> #include <cublas_v2.h> #include <cublasLt.h> #define Value 127 #define checkCudaAPIErrors(F) if ((F) != cudaSuccess) \ { printf("Error at line %d in file %s: %s\n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError())); exit(-1); } static const char *_cudaGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } #define checkcuBlasError(F) if ((F) != CUBLAS_STATUS_SUCCESS) \ { printf("Error at line %d in file %s: %s\n", __LINE__, __FILE__, _cudaGetErrorEnum(F)); exit(-1); } int roundoff(int v, int d) { return ((v+d-1)/d) * d; } __global__ void memKernel(int8_t * in, int8_t val, int size) { for(int i = 0; i < size; i++) in[i] = val; } int ltIgemmTensor(cublasLtHandle_t ltHandle, int m, int n, int k, const int8_t *A, int lda, const int8_t *B, int ldb, int32_t *C, int ldc, int iters, float &time_matmul) { cublasStatus_t cublasStat = CUBLAS_STATUS_SUCCESS; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cublasLtMatmulDesc_t matmulDesc; cublasLtMatrixLayout_t Adesc = NULL; cublasLtMatrixLayout_t Bdesc = NULL; cublasLtMatrixLayout_t Cdesc = NULL; int32_t alpha = 1; int32_t beta = 0; cublasOperation_t opTranspose = CUBLAS_OP_T; // The tensor op igemm kernels require specialized memory order of data cublasLtMatrixTransformDesc_t transformDesc = NULL; int8_t *Atransform = NULL; int8_t *Btransform = NULL; int32_t *Ctransform = NULL; cublasLtMatrixLayout_t AtransformDesc = NULL; cublasLtMatrixLayout_t BtransformDesc = NULL; cublasLtMatrixLayout_t CtransformDesc = NULL; float transformAlpha = 1.0f; float transformBeta = 0.0f; cublasLtOrder_t order_COL32 = CUBLASLT_ORDER_COL32; cublasLtOrder_t order_COL4_4R2_8C = CUBLASLT_ORDER_COL4_4R2_8C; int ldaTransform = 32 * m; int ldbTransform = 32 * roundoff(n,8); int ldcTransform = 32 * m; checkCudaAPIErrors(cudaMalloc((void **)&Atransform, sizeof(int8_t ) * roundoff(k, 32)/32*ldaTransform)); checkCudaAPIErrors(cudaMalloc((void **)&Btransform, sizeof(int8_t ) * roundoff(k, 32)/32*ldbTransform)); checkCudaAPIErrors(cudaMalloc((void **)&Ctransform, sizeof(int32_t )* roundoff(n, 32)/32*ldcTransform)); // create transformDesc checkcuBlasError(cublasLtMatrixTransformDescCreate(&transformDesc, CUDA_R_32F)); // create matmulDesc checkcuBlasError(cublasLtMatmulDescCreate(&matmulDesc, CUDA_R_32I)); // Tensor op igemm kernels only support NT gemm checkcuBlasError(cublasLtMatmulDescSetAttribute(matmulDesc, CUBLASLT_MATMUL_DESC_TRANSB, &opTranspose, sizeof(cublasOperation_t))); // Create descriptors for the original matrices checkcuBlasError(cublasLtMatrixLayoutCreate(&Adesc, CUDA_R_8I, m, k, lda)); // B or transposed B checkcuBlasError(cublasLtMatrixLayoutCreate(&Bdesc, CUDA_R_8I, n, k, ldb)); checkcuBlasError(cublasLtMatrixLayoutCreate(&Cdesc, CUDA_R_32I, m, n, ldc)); // Create descriptors for the transformed matrices checkcuBlasError(cublasLtMatrixLayoutCreate(&AtransformDesc, CUDA_R_8I, m, k, ldaTransform)); checkcuBlasError(cublasLtMatrixLayoutSetAttribute(AtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL32, sizeof(order_COL32))); checkcuBlasError(cublasLtMatrixLayoutCreate(&BtransformDesc, CUDA_R_8I, n, k, ldbTransform)); checkcuBlasError(cublasLtMatrixLayoutSetAttribute(BtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL4_4R2_8C, sizeof(order_COL4_4R2_8C))); checkcuBlasError(cublasLtMatrixLayoutCreate(&CtransformDesc, CUDA_R_32I, m, n, ldcTransform)); checkcuBlasError(cublasLtMatrixLayoutSetAttribute(CtransformDesc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order_COL32, sizeof(order_COL32))); cublasLtMatmulPreference_t preference = NULL; int returnedResults = 0; cublasLtMatmulHeuristicResult_t heuristicResult = {}; checkcuBlasError(cublasLtMatmulPreferenceCreate(&preference)); checkcuBlasError(cublasLtMatmulAlgoGetHeuristic( ltHandle, matmulDesc, AtransformDesc, BtransformDesc, CtransformDesc, CtransformDesc, preference, 1, &heuristicResult, &returnedResults)); cublasLtMatmulTile_t tileSize = CUBLASLT_MATMUL_TILE_128x256; cublasLtMatmulAlgoConfigSetAttribute( &heuristicResult.algo, CUBLASLT_ALGO_CONFIG_TILE_ID, &tileSize, sizeof(cublasLtMatmulTile_t)); if (returnedResults == 0) { checkcuBlasError(CUBLAS_STATUS_NOT_SUPPORTED); } checkcuBlasError(cublasLtMatrixTransform(ltHandle, transformDesc, &transformAlpha, A, Adesc, &transformBeta, NULL, NULL, Atransform, AtransformDesc, 0)); checkcuBlasError(cublasLtMatrixTransform(ltHandle, transformDesc, &transformAlpha, B, Bdesc, &transformBeta, NULL, NULL, Btransform, BtransformDesc, 0)); //for(int i = 0; i < 1; i++) { // memKernel<<<1,1,0,0>>>(Atransform, 0, 2000); // } cudaEventRecord(start, 0); for (int i=0; i<iters; i++) { checkcuBlasError(cublasLtMatmul(ltHandle, matmulDesc, &alpha, Atransform, AtransformDesc, Btransform, BtransformDesc, &beta, Ctransform, CtransformDesc, Ctransform, CtransformDesc, &heuristicResult.algo, NULL, 0, 0)); } cudaEventRecord(stop, 0); checkcuBlasError(cublasLtMatrixTransform(ltHandle, transformDesc, &transformAlpha, Ctransform, CtransformDesc, &transformBeta, NULL, NULL, C, Cdesc, 0)); //cudaEventSynchronize(stop); cudaDeviceSynchronize(); time_matmul=0.0; cudaEventElapsedTime(&time_matmul, start, stop); time_matmul /= iters; // Descriptors are no longer needed as all GPU work was already enqueued. if (CtransformDesc) cublasLtMatrixLayoutDestroy(CtransformDesc); if (BtransformDesc) cublasLtMatrixLayoutDestroy(BtransformDesc); if (AtransformDesc) cublasLtMatrixLayoutDestroy(AtransformDesc); if (Cdesc) cublasLtMatrixLayoutDestroy(Cdesc); if (Bdesc) cublasLtMatrixLayoutDestroy(Bdesc); if (Adesc) cublasLtMatrixLayoutDestroy(Adesc); if (matmulDesc) cublasLtMatmulDescDestroy(matmulDesc); if (transformDesc) cublasLtMatrixTransformDescDestroy(transformDesc); // Wait until device is done before freeing transformed buffers cudaDeviceSynchronize(); if (Ctransform) cudaFree(Ctransform); if (Btransform) cudaFree(Btransform); if (Atransform) cudaFree(Atransform); return cublasStat == CUBLAS_STATUS_SUCCESS ? 0 : 1; } // initialize matrix in column-major void matInit(int rows, int cols, int8_t *p, int ld) { srand(time(NULL)); for (int c=0; c<cols; c++) { for (int r=0; r<rows; r++) { int index = r + c * ld; p[index] = rand()%10; //p[index] = 0; } } } void matDisplay(int rows, int cols, int8_t *p, int ld) { for (int c=0; c<cols; c++) { for (int r=0; r<rows; r++) { int index = r + c * ld; printf("%4d", p[index]); } } } // mat is column-major // matT is row-major void transpose(int8_t *matT, int8_t *mat, int rows, int cols) { for (int c=0; c<cols; c++) { for (int r=0; r<rows; r++) { int indexIn = r + c*rows; int indexOut= c + r*cols; matT[indexOut] = mat[indexIn]; } } } void matMul(int m, int n, int k, const int8_t *A, int lda, const int8_t *B, int ldb, int32_t *C, int ldc) { int32_t sum; for (int c=0; c<n; c++) { for (int r=0; r<m; r++) { sum = 0; for (int kk=0; kk<k; kk++) { int idxA = kk*lda + r; // A[r][kk] int idxB = c*ldb + kk; // B[kk][c] sum += A[idxA] * B[idxB]; } C[c*ldc + r] = sum; // C[r][c] } } } void postprocess(const int32_t *ref, const int32_t *res, int m, int n, int k, float ms) { for (int c=0; c<n; c++) { for (int r=0; r<m; r++) { int index = r + c*m; if (ref[index] != res[index]) { printf("(row = %d, col = %d) gpu result=%d cpu ref=%d ", r, c, res[index], ref[index]); printf("%25s\n", "*** FAILED ***"); break; } } } } double cal_tflops(int m, int n, int k, double msec) { double flops = 2. * m * n * k; double tflops = (1E-12*flops) / (1E-3*msec); return tflops; } void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){ float tflops = cal_tflops(m, n, k, cublasTime); if (tflops > s_max_tflops){ s_max_tflops = tflops; s_max_m_n = m; s_max_k = k; } cout << setw(7) << m << ","; cout << setw(7) << n << ","; cout << setw(7) << k << ","; cout << setw(15) << setprecision(4) << cublasTime << ","; cout << setw(15) << setprecision(4) << tflops << "," << endl; } void calINT8Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int iters){ int devID = 0; int8_t *h_A = NULL; // m * k, stored in column-major int8_t *h_B = NULL; // k * n, stored in column-major int8_t *h_BT = NULL; // k * n, stored in column-major int32_t *h_C = NULL; // m * n, stored in column-major int32_t *h_Cres = NULL; // m * n, stored in column-major int8_t *d_A = NULL; // m * k, stored in column-major int8_t *d_B = NULL; // k * n, stored in column-major int8_t *d_BT= NULL; // k * n, stored in column-major int32_t *d_C = NULL; // m * n, stored in column-major // allocate memory h_A = (int8_t* )malloc(sizeof(int8_t ) * m * k); if (!h_A) printf("falied to allocate mem on CPU"); h_B = (int8_t* )malloc(sizeof(int8_t ) * k * n); // B : k*n if (!h_B) printf("falied to allocate mem on CPU"); // BT: n*k, the transpose of B h_BT= (int8_t* )malloc(sizeof(int8_t ) * n * k); if (!h_BT) printf("falied to allocate mem on CPU"); h_C = (int32_t*)malloc(sizeof(int32_t) * m * n); if (!h_C) printf("falied to allocate mem on CPU"); h_Cres = (int32_t*)malloc(sizeof(int32_t) * m * n); if (!h_Cres) printf("falied to allocate mem on CPU"); checkCudaAPIErrors(cudaMalloc((void **)&d_A, sizeof(int8_t ) * m * k)); checkCudaAPIErrors(cudaMalloc((void **)&d_B, sizeof(int8_t ) * k * n)); checkCudaAPIErrors(cudaMalloc((void **)&d_BT,sizeof(int8_t ) * n * k)); checkCudaAPIErrors(cudaMalloc((void **)&d_C, sizeof(int32_t) * m * n)); cudaSetDevice(devID); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, devID); //printf("Device : %s, compute SM %d.%d.\n",devProp.name, devProp.major, devProp.minor); cublasLtHandle_t ltHandle; checkcuBlasError(cublasLtCreate(&ltHandle)); cublasHandle_t handle; checkcuBlasError(cublasCreate(&handle)); float time_matmul= 0.0; // ms cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // step 1: initialize A and B //printf("step 1: initialize A and B with m=%d, n=%d and k=%d\n", m, n, k); matInit(m, k, h_A, m); //matDisplay(m, k, h_A, m); matInit(k, n, h_B, k); transpose(h_BT, h_B, k, n); //matDisplay(k, n, h_B, k); // step 2: compute matmul on cpu //printf("step 2: do gemm on CPU\n"); //matMul(m, n, k, h_A, m, h_B, k, h_C, m); //step 3: copy date from host to device //printf("step 3: copy date from host to device\n"); checkCudaAPIErrors(cudaMemcpy(d_A, h_A, sizeof(int8_t) * m * k,cudaMemcpyHostToDevice)); checkCudaAPIErrors(cudaMemcpy(d_B, h_B, sizeof(int8_t) * k * n,cudaMemcpyHostToDevice)); checkCudaAPIErrors(cudaMemcpy(d_BT,h_BT,sizeof(int8_t) * n * k,cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); /* // step 4-1: cublasGemmEx printf("step 4-1: call API cublasGemmEx\n"); cudaEventRecord(start, 0); cublasStatus_t cublasStat; for (int t = 0; t < iters; t++) { cublasStat=cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, d_A, CUDA_R_8I, m, d_B, CUDA_R_8I, k, &beta, d_C, CUDA_R_32I, m, CUDA_R_32I, static_cast<cublasGemmAlgo_t>(CUBLAS_GEMM_DEFAULT)); if(cublasStat != CUBLAS_STATUS_SUCCESS) { checkcuBlasError(cublasStat); continue; } } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_used, start, stop); time_used /= iters; checkCudaAPIErrors(cudaMemcpy(h_Cres, d_C, sizeof(int32_t) * m * n, cudaMemcpyDeviceToHost)); postprocess(h_C, h_Cres, m, n, k, time_used); */ // step 4-2: cublasLtMatMul //printf("step 4: call API cublasLtMatMul\n"); ltIgemmTensor(ltHandle, m, n, k, d_A, m, d_BT,n, d_C, m, iters, time_matmul); checkCudaAPIErrors(cudaMemcpy(h_Cres, d_C, sizeof(int32_t) * m * n, cudaMemcpyDeviceToHost)); // comment out the results check. It is verified before I upload //postprocess(h_C, h_Cres, m, n, k, time_matmul); printTime(time_matmul, m, n, k, s_max_tflops, s_max_m_n, s_max_k); //free memory free(h_A); free(h_B); free(h_BT); free(h_C); free(h_Cres); checkCudaAPIErrors(cudaFree(d_A)); checkCudaAPIErrors(cudaFree(d_B)); checkCudaAPIErrors(cudaFree(d_BT)); checkCudaAPIErrors(cudaFree(d_C)); checkCudaAPIErrors(cudaEventDestroy(start)); checkCudaAPIErrors(cudaEventDestroy(stop)); checkcuBlasError(cublasDestroy(handle)); checkcuBlasError(cublasLtDestroy(ltHandle)); } int main(int argc, char** argv) { int m,n,k; bool perf = true; if (argc < 2) { return EXIT_FAILURE; } if (argc == 2) { std::string tmp = argv[1]; if (tmp == "performance") perf= true; else if (tmp == "pressure") perf = false; else { std::cout << "Invalid parameters!"<<std::endl; return EXIT_FAILURE; } } float s_max_tflops = 0; int s_max_m_n = 0; int s_max_k = 0; int numRepeats; // for perf test if (perf == true) { cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl; cout << setw(7) << "m" << setw(7) << "n" << setw(7) << "k"; cout << setw(20) << "Time (msec)" << setw(15) << "TOPS"; cout << endl; s_max_tflops = 0; numRepeats = 10; for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) { for(k=1024; k <= 20480; k+=4096) { calINT8Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats); }} cout << "[Peak TOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< endl; checkCudaAPIErrors(cudaDeviceReset()); } if (perf == false) { cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl; cout << setw(7) << "m" << setw(7) << "n" << setw(7) << "k"; cout << setw(20) << "Time (msec)" << setw(15) << "TOPS"; cout << endl; s_max_tflops = 0; numRepeats = 2000; std::vector<int> mnk={512, 1024, 5120, 10240}; for(int i=0; i<mnk.size(); i++) calINT8Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats); checkCudaAPIErrors(cudaDeviceReset()); } }
the_stack
#include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo); } //////////////////////////////////////////////////////////////////////// void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T* sharedMem; __shared__ int xRank, zRank, *coordsMem; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<T*>(shmem); coordsMem = reinterpret_cast<int*>(shmem + blockDim.x * sizeof(T)); xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); auto coords = coordsMem + threadIdx.x * xRank; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(m, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(xShapeInfo, coords); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; __syncthreads(); } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen); } /////////////////////////////////////////////////////////////////// void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * (sizeof(int) * input.rankOf() + input.sizeOfT()) + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets, *sharedMem; // xRank = zRank __shared__ Nd4jLong len, totalThreads; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag); } /////////////////////////////////////////////////////////////////// void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank, *sharedMem; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto memBuff = sharedMem + threadIdx.x * 2 * xRank; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem); } ////////////////////////////////////////////////////////////////////////// void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } template <typename T> static __global__ void swapShuffleKernel(T* input, Nd4jLong const* shape, Nd4jLong firstDim, sd::graph::RandomGenerator* rng) { auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; if (i != r) { const auto iOffset = shape::getIndexOffset(i, shape); const auto rOffset = shape::getIndexOffset(r, shape); T e0 = input[iOffset]; T e1 = input[rOffset]; //math::nd4j_swap<T>(input(i), input(r)); input[iOffset] = e1; input[rOffset] = e0; } } } template <typename T> static __global__ void fillShuffleKernel(T* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape, Nd4jLong firstDim, int* indices, sd::graph::RandomGenerator* rng) { // PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; output[shape::getIndexOffset(i, outputShape)] = input[shape::getIndexOffset(indices[r], inputShape)]; if(i != r) { output[shape::getIndexOffset(r, outputShape)] = input[shape::getIndexOffset(indices[i], inputShape)]; // output.p(r, input.e<T>(indices[i])); // math::nd4j_swap<int>(indices[i], indices[r]); atomicExch(&indices[i], indices[r]); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> void randomShuffle_(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { // check edge cases first int temp; const int firstDim = input.sizeAt(0); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input}); if(input.lengthOf() == 1 || firstDim == 1) { if(!isInplace) output.assign(input); } else if (input.isVector() || shape::isLikeVector(input.shapeInfo(), temp)) { // apply Fisher-Yates shuffle sd::graph::RandomGenerator* dRandom = nullptr; cudaMalloc(&dRandom, sizeof(sd::graph::RandomGenerator)); cudaMemcpy(dRandom, &rng, sizeof(sd::graph::RandomGenerator), cudaMemcpyHostToDevice); T* inputBuf = reinterpret_cast<T*>(input.specialBuffer()); if(isInplace) { swapShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), firstDim, dRandom); } else { std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); cudaMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), cudaMemcpyDeviceToDevice); //output.p<T>(Nd4jLong(0), input.e<T>(0)); PointersManager pointersManager(context, "helper::randomShuffle_"); int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int))); T* outputBuf = reinterpret_cast<T*>(output.specialBuffer()); fillShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, indicesDev, dRandom); pointersManager.synchronize(); } // rng.rewindH(firstDim - 1); cudaFree(dRandom); } else { // evaluate sub-arrays list of input array through all dimensions excluding first one std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0}); auto subArrsListIn = input.allTensorsAlongDimension(dimensions); // apply Fisher-Yates shuffle if(isInplace) { for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; if(i != r) subArrsListIn.at(i)->swapUnsafe(*subArrsListIn.at(r)); } } else { // evaluate sub-arrays list of output array through all dimensions excluding first one auto subArrsListOut = output.allTensorsAlongDimension(dimensions); std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); bool isZeroShuffled = false; for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; subArrsListOut.at(i)->assign(subArrsListIn.at(indices[r])); if(r == 0) isZeroShuffled = true; if(i != r) { subArrsListOut.at(r)->assign(subArrsListIn.at(indices[i])); math::nd4j_swap<int>(indices[i], indices[r]); } } if(!isZeroShuffled) subArrsListOut.at(0)->assign(subArrsListIn.at(0)); } rng.rewindH(firstDim-1); } NDArray::registerSpecialUse({&output}, {&input}); } void randomShuffle(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES); ////////////////////////////////////////////////////////////////////////// void eye(sd::LaunchContext * context, NDArray& output) { output.setIdentity(); } } } }
the_stack
#include "cuda_error.h" #include "cuda_runtime.h" #include "StreamingKernels.h" #include "SingleFitStream.h" #include "JobWrapper.h" #include "GpuMultiFlowFitControl.h" #include "SignalProcessingFitterQueue.h" #include "LayoutTranslator.h" #define REGIONAL_DUMP 0 //#define MIN_MEMORY_FOR_ONE_STREAM (450*1024*1024) #define FGBUFFER_DUMP 0 #define EMPTYTRACE_DUMP 0 #define RESULT_DUMP 0 #define PROJECTION_ONLY 0 #define COLLECT_SAMPLES_FROM_FIRST20 0 using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////////// //// Simple int SimpleSingleFitStream::_bpb = -1; int SimpleSingleFitStream::_l1type = -1; int SimpleSingleFitStream::_fittype = -1; //0 GaussNewton, 1 LevMar, 2 Hybrid, 3 Relaxing Kmult Gauss Newton int SimpleSingleFitStream::_hybriditer = -1; // LevMar after N iter of Gauss newton int SimpleSingleFitStream::l1DefaultSetting() { // 0: Equal, 1: Shared preferred, 2: L1 preferred if(_computeVersion == 20 ) return 2; if(_computeVersion >= 35 ) return 0; return 0; } int SimpleSingleFitStream::BlockSizeDefaultSetting() { // With recent rearrangements (10-31-13), the magic number for C2075 seems to be 160... // And really, I think I've tested it properly... if(_computeVersion == 20 ) return 160; if(_computeVersion >= 35 ) return 256; return 128; } ///////////////////////////////////////////////// //FIT STREAM CLASS SimpleSingleFitStream::SimpleSingleFitStream(streamResources * res, WorkerInfoQueueItem item ) : cudaSimpleStreamExecutionUnit(res, item), _myJob( static_cast< BkgModelWorkInfo * >( item.private_data )->flow_key, static_cast< BkgModelWorkInfo * >( item.private_data )->inception_state-> bkg_control.signal_chunks.flow_block_sequence.BlockAtFlow( static_cast< BkgModelWorkInfo * >( item.private_data )->flow )->size() ) { setName("SingleFitStream"); if(_verbose) cout << getLogHeader() << " created " << endl; _N = 0; _F = 0; _padN = 0; } SimpleSingleFitStream::~SimpleSingleFitStream() { cleanUp(); } void SimpleSingleFitStream::cleanUp() { if(_verbose) cout << getLogHeader() << " clean up" << endl; } void SimpleSingleFitStream::resetPointers() { _N = _myJob.getNumBeads(); _F = _myJob.getNumFrames(); if(!_resource->checkDeviceMemory(getMaxDeviceMem(_myJob.getFlowKey(),_myJob.getFlowBlockSize(), _F,_N ))) cout << getLogHeader() << " Successfully reallocated device memory to handle Job" << endl; _padN = _myJob.getPaddedN(); if(_verbose) cout << getLogHeader() << " resetting pointers for job with " << _N << "("<< _padN <<") beads and " << _F << " frames" << endl; try{ //HOST DEVICE buffer pairs, Input and Output groups _hdFgBuffer = _resource->GetHostDevPair(_myJob.getFgBufferSizeShort(true)); //fg buffers are copied first to overlap async copy with gathering of other input data _resource->StartNewSegGroup(); _hdBeadParams = _resource->GetHostDevPair(_myJob.getBeadParamsSize(true)); _hdBeadState = _resource->GetHostDevPair( _myJob.getBeadStateSize(true)); //bead Params and State are our outputs. therefore: _hdCopyOutGroup = _resource->GetCurrentPairGroup(); //do not start new group since outputs are also parts of input group _hdDarkMatter = _resource->GetHostDevPair(_myJob.getDarkMatterSize(true)); // NUMNUC*F _hdShiftedBkg = _resource->GetHostDevPair(_myJob.getShiftedBackgroundSize(true)); // flow_block_size*F _hdFineEmphVector = _resource->GetHostDevPair(_myJob.getEmphVecSize(true)); // (MAX_POISSON_TABLE_COL)*F _hdCrudeEmphVector = _resource->GetHostDevPair(_myJob.getEmphVecSize(true)); // (MAX_POISSON_TABLE_COL)*F _hdCoarseNucRise = _resource->GetHostDevPair(_myJob.getCoarseNucRiseSize(true)); // ISIG_SUB_STEPS_MULTI_FLOW * F * flow_block_size _hdFineNucRise = _resource->GetHostDevPair(_myJob.getFineNucRiseSize(true)); // ISIG_SUB_STEPS_SINGLE_FLOW * F * flow_block_size //all inputs are grouped now _hdCopyInGroup = _resource->GetCurrentPairGroup(); //Device Only Memory Segments _dFgBufferFloat = _resource->getDevSegment(_myJob.getFgBufferSize(true)); _dWorkBase = _resource->getDevSegment(getScratchSpaceAllocSize(_myJob) ); _dBeadParamTransp = _resource->getDevSegment(_myJob.getBeadParamsSize(true)); //std::cout << "Memory used: " << _resource->getDevMem()->memoryUsed() << std::endl; //additional host pointers for Constant memory init _hConstP = _resource->getHostSegment(sizeof(ConstParams)); if(_myJob.performCrossTalkCorrection()){ _hConstXtalkP = _resource->getHostSegment(sizeof(ConstXtalkParams)); _hNeiIdxMap = _resource->getHostSegment(_myJob.getXtalkNeiIdxMapSize(true)); _hSampleNeiIdxMap = _resource->getHostSegment(_myJob.getXtalkSampleNeiIdxMapSize(true)); } //Reuse buffers on the device for other stuff ot create pointers to repacked data // We'll use this BeadParams as a reference to check against. // If someone tries to rearrange the data structures in BeadParams, we should complain. // Someday, we ought to access these chunks of data dynamically, and not be dependent on // BeadParams internals. // Checking for positive differences (>0) ensures that the fields are in the right order, // whatever size they happen to be. BeadParams dummy; //dev pointer after transpose (Structure of Arrays) size_t padNB = _padN*sizeof(float); _dCopies = _dBeadParamTransp; //N assert( & dummy.R - & dummy.Copies == 1 ); _dR = _dCopies.splitAt(padNB); // N assert( & dummy.dmult - & dummy.R == 1 ); _dDmult = _dR.splitAt(padNB); // N assert( & dummy.gain - & dummy.dmult == 1 ); _dGain = _dDmult.splitAt(padNB); // N assert( dummy.Ampl - & dummy.gain == 1 ); _dAmpl = _dGain.splitAt(padNB); // N * flow_block_size assert( dummy.kmult - dummy.Ampl > 0 ); _dKmult = _dAmpl.splitAt(padNB*(dummy.kmult - dummy.Ampl)); // N * flow_block_size assert( dummy.pca_vals - dummy.kmult > 0 ); _dPCA_Vals = _dKmult.splitAt(padNB*(dummy.pca_vals - dummy.kmult)); // N*NUM_DM_PCA assert( & dummy.tau_adj - dummy.pca_vals == NUM_DM_PCA ); _dTau_Adj = _dPCA_Vals.splitAt(padNB*NUM_DM_PCA); // N assert( & dummy.phi - & dummy.tau_adj == 1 ); _dPhi = _dTau_Adj.splitAt(padNB); // N _dPhi.checkSize(padNB); // N //device scratch space pointers _davg_trc = _dWorkBase; // NxF _derr = _davg_trc.splitAt(padNB*_F); // NxF _dfval = _derr.splitAt(padNB*_F); // NxF _dtmp_fval = _dfval.splitAt(padNB*_F); // NxF _djac = _dtmp_fval.splitAt(padNB*_F); // 3*NxF Can be reduced in Taubadjust kernel _dMeanErr = _djac.splitAt(3*padNB *_F); // N * flow_block_size _dMeanErr.checkSize(padNB*_myJob.getFlowBlockSize()); // xtalk scratch space pointers if(_myJob.performCrossTalkCorrection()){ _dNeiContribution = _dWorkBase; _dXtalk = _dNeiContribution.splitAt(padNB *_myJob.getNumXtalkNeighbours()*_F); _dXtalkScratch = _dXtalk.splitAt(padNB*_F); _dNeiIdxMap = _dXtalkScratch.splitAt(padNB * 3*_F); _dSampleNeiIdxMap = _dNeiIdxMap.splitAt(_myJob.getXtalkNeiIdxMapSize(true)); _dGenericXtalk = _dSampleNeiIdxMap.splitAt(_myJob.getXtalkSampleNeiIdxMapSize(true)); _dGenericXtalk.checkSize(_myJob.getPaddedGenericXtalkSample()*_F); } } catch(cudaException &e) { e.Print(); cout << getLogHeader() << "Encountered Error during Resource Acquisition!" << endl; throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__); } if(_verbose)cout << getLogHeader() << " " << _resource->Status() << endl; } void SimpleSingleFitStream::serializeInputs() { if(_verbose) cout << getLogHeader() <<" serialize data for async global mem copy" << endl; try{ _hdFgBuffer.copyIn(_myJob.getFgBuffer() ,_myJob.getFgBufferSizeShort()); _hdBeadParams.copyIn(_myJob.getBeadParams(),_myJob.getBeadParamsSize()); _hdBeadState.copyIn(_myJob.getBeadState(),_myJob.getBeadStateSize()); _hdDarkMatter.copyIn(_myJob.getDarkMatter(), _myJob.getDarkMatterSize()); _hdShiftedBkg.copyIn(_myJob.getShiftedBackground(), _myJob.getShiftedBackgroundSize()); _hdCoarseNucRise.copyIn(_myJob.getCoarseNucRise(), _myJob.getCoarseNucRiseSize()); _hdFineNucRise.copyIn(_myJob.getFineNucRise(), _myJob.getFineNucRiseSize()); // a little hacky but we want to fill the structure in page locked memory with data ConstParams* tmpConstP = _hConstP.getPtr(); //init the reg_param part (all we need from the reg params is non-dynamic) reg_params* tmpConstPCastToReg = (reg_params*)tmpConstP; *(tmpConstPCastToReg) = *(_myJob.getRegionParams()); // use the // init the rest of the ConstParam buffers #if 0 cout << "flow," << _myJob.getAbsoluteFlowNum() << ",ratiodrift," << tmpConstPCastToReg->RatioDrift << endl; #endif memcpy( tmpConstP->coarse_nuc_start, _myJob.getCoarseNucStart(), _myJob.getStartNucSize() ); memcpy( tmpConstP->fine_nuc_start, _myJob.getFineNucStart(), _myJob.getStartNucSize() ); memcpy( tmpConstP->deltaFrames, _myJob.getDeltaFrames(), _myJob.getDeltaFramesSize() ); memcpy( tmpConstP->frameNumber, _myJob.getFrameNumber(), _myJob.getFrameNumberSize() ); memcpy( tmpConstP->flowIdxMap, _myJob.getFlowIdxMap(), _myJob.getFlowIdxMapSize()); _myJob.setUpCrudeEmphasisVectors(); memcpy(tmpConstP->non_zero_crude_emphasis_frames, _myJob.GetNonZeroEmphasisFrames(), _myJob.GetNonZeroEmphasisFramesVecSize()); _hdCrudeEmphVector.copyIn(_myJob.getEmphVec(), _myJob.getEmphVecSize()); _myJob.setUpFineEmphasisVectors(); memcpy(tmpConstP->non_zero_fine_emphasis_frames, _myJob.GetNonZeroEmphasisFrames(), _myJob.GetNonZeroEmphasisFramesVecSize()); _hdFineEmphVector.copyIn(_myJob.getEmphVec(), _myJob.getEmphVecSize()); tmpConstP->useDarkMatterPCA = _myJob.useDarkMatterPCA(); size_t rC = _myJob.getRegCol(); size_t rR = _myJob.getRegRow(); /* ImgRegParams irP; irP.init(_myJob.getImgWidth(),_myJob.getImgHeight(), 216,224,_myJob.getNumFrames()); size_t regId = irP.getRegId(rC,rR); if(irP.isInRegion(regId, 321928)) tmpConstP->dumpRegion = true; else tmpConstP->dumRegion = false; */ if(_myJob.performCrossTalkCorrection()) { // copy neighbor map for xtalk ConstXtalkParams *tmpConstXtalkP = _hConstXtalkP.getPtr(); tmpConstXtalkP->simpleXtalk = _myJob.IsSimpleTraceLevelXtalk(); tmpConstXtalkP->neis = _myJob.getNumXtalkNeighbours(); memcpy( tmpConstXtalkP->multiplier, _myJob.getXtalkNeiMultiplier(),sizeof(float)*_myJob.getNumXtalkNeighbours()); memcpy( tmpConstXtalkP->tau_top, _myJob.getXtalkNeiTauTop(),sizeof(float)*_myJob.getNumXtalkNeighbours()); memcpy( tmpConstXtalkP->tau_fluid, _myJob.getXtalkNeiTauFluid(),sizeof(float)*_myJob.getNumXtalkNeighbours()); _hNeiIdxMap.copyIn(const_cast<int*>(_myJob.getNeiIdxMapForXtalk()), sizeof(int)*_myJob.getNumBeads()*_myJob.getNumXtalkNeighbours()); _hSampleNeiIdxMap.copyIn(const_cast<int*>(_myJob.getSampleNeiIdxMapForXtalk()), sizeof(int)*(GENERIC_SIMPLE_XTALK_SAMPLE)*_myJob.getNumXtalkNeighbours()); } if( (_myJob.getAbsoluteFlowNum()%_myJob.getFlowBlockSize()) == 0 ){ ImgRegParams irP(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getMaxRegionWidth(),_myJob.getMaxRegionHeight()); if(_myJob.getAbsoluteFlowNum() >= 20){ //static RegParamDumper regDump(_myJob.getImgWidth(),_myJob.getImgHeight(),_myJob.getRegionWidth(), _myJob.getRegionHeight()); //regDump.DumpAtFlow(rC, rR,*tmpConstP,_myJob.getAbsoluteFlowNum()); #if REGIONAL_DUMP static CubePerFlowDump<reg_params> RegionDump( irP.getGridDimX(), irP.getGridDimY(),1,1,1,1); RegionDump.setFilePathPrefix("RegionParams"); RegionDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getRegionParams(),_myJob.getAbsoluteFlowNum(),1); #endif #if EMPTYTRACE_DUMP static CubePerFlowDump<float> emptyTraceDump( irP.getGridDimX()*_myJob.getUncompressedFrames(), irP.getGridDimY(), _myJob.getUncompressedFrames(), 1,1, _myJob.getFlowBlockSize()); //empty trace average; emptyTraceDump.setFilePathPrefix("EmptyTraces"); emptyTraceDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getShiftedBackground(),_myJob.getAbsoluteFlowNum(),_myJob.getNumFrames()); #endif /* static CubePerFlowDump<float> NucRiseDump( irP.getGridDimX()*_myJob., irP.getGridDimY(), _myJob.getUncompressedFrames(), 1,1, _myJob.getFlowBlockSize()); //empty trace average; NucRiseDump.setFilePathPrefix("NucRise"); NucRiseDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getShiftedBackground(),_myJob.getAbsoluteFlowNum(),_myJob.getNumFrames()); */ /* static CubePerFlowDump<float> emphasisDump( irP.getGridDimX()*_myJob.getMaxFrames()*MAX_POISSON_TABLE_COL, irP.getGridDimY(), _myJob.getMaxFrames()*MAX_POISSON_TABLE_COL, 1,1,1); emphasisDump.setFilePathPrefix("EmphasisDump"); emphasisDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getEmphVec(), _myJob.getAbsoluteFlowNum(), _myJob.getEmphVecSize()/sizeof(float)); */ #if FGBUFFER_DUMP static CubePerFlowDump<short> FGDump(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getRegionWidth(),_myJob.getRegionHeight(),_myJob.getImageFrames(), _myJob.getFlowBlockSize()); FGDump.setFilePathPrefix("FgBufferDump"); size_t regId = irP.getRegId(rC,rR); LayoutCubeWithRegions<short> fgBufferCube(irP.getRegW(regId),irP.getRegH(regId),irP.getRegW(regId),irP.getRegH(regId),_myJob.getNumFrames()); for(int f=0; f<_myJob.getFlowBlockSize(); f++){ TranslateFgBuffer_RegionToCube(fgBufferCube, _myJob.getNumBeads() , _myJob.getNumFrames(), _myJob.getFlowBlockSize(),_myJob.getFgBuffer()+_myJob.getNumFrames()*f,_myJob.getBeadParams(), 0); FGDump.DumpOneFlowRegion(regId,fgBufferCube,0,_myJob.getAbsoluteFlowNum(),f,0,_myJob.getNumFrames()); } #endif } } } catch(cudaException &e) { cout << getLogHeader() << "Encountered Error during Input Serialization!" << endl; throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__); } } ////////////////////////// // IMPLEMENTATION OF THE VIRTUAL INTERFACE // ASYNC CUDA FUNCTIONS, KERNEL EXECUTION AND DATA HANDLING bool SimpleSingleFitStream::InitJob() { _myJob.setData(static_cast<BkgModelWorkInfo *>(getJobData())); return _myJob.ValidJob(); } void SimpleSingleFitStream::ExecuteJob() { prepareInputs(); copyToDevice(); executeKernel(); copyToHost(); } int SimpleSingleFitStream::handleResults() { if(_verbose) cout << getLogHeader() << " Handling Results" <<endl; if(_myJob.isSet()){ // for actual pipeline we have to copy the results back into original buffer try{ _hdBeadParams.copyOut(_myJob.getBeadParams(), _myJob.getBeadParamsSize()); _hdBeadState.copyOut(_myJob.getBeadState(),_myJob.getBeadStateSize()); #if RESULT_DUMP if( _myJob.getAbsoluteFlowNum() >= 20){ ImgRegParams irP; irP.init(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getRegionWidth(),_myJob.getRegionHeight()); size_t rC = _myJob.getRegCol(); size_t rR = _myJob.getRegRow(); size_t regId = irP.getRegId(rC,rR); static CubePerFlowDump<float> ResultDump(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getRegionWidth(),_myJob.getRegionHeight(), Result_NUM_PARAMS, _myJob.getFlowBlockSize()); ResultDump.setFilePathPrefix("ResultDump"); LayoutCubeWithRegions<float> ResultCube(irP.getRegW(regId),irP.getRegH(regId),irP.getRegW(regId),irP.getRegH(regId),Result_NUM_PARAMS); for(int f=0; f<_myJob.getFlowBlockSize(); f++){ TranslateResults_RegionToCube(ResultCube, _myJob.getNumBeads() , f, _myJob.getBeadParams(), 0); ResultDump.DumpOneFlowRegion(regId,ResultCube,0,_myJob.getAbsoluteFlowNum(),f); } } #endif _myJob.setJobToPostFitStep(); _myJob.putJobToCPU(_item); } catch(cudaException &e) { cout << getLogHeader() << "Encountered Error during Result Handling!" << endl; throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__); } } return 0; } void SimpleSingleFitStream::printStatus() { cout << getLogHeader() << " status: " << endl << " +------------------------------" << endl << " | block size: " << getBeadsPerBlock() << endl << " | l1 setting: " << getL1Setting() << endl << " | state: " << _state << endl; if(_resource->isSet()) cout << " | streamResource acquired successfully"<< endl; else cout << " | streamResource not acquired"<< endl; _myJob.printJobSummary(); cout << " +------------------------------" << endl; } /////////////////////////////////////////////////////////////// void SimpleSingleFitStream::prepareInputs() { //prepare environment for new job preProcessCpuSteps(); resetPointers(); serializeInputs(); } void SimpleSingleFitStream::copyToDevice() { // move data to device if(_verbose) cout << getLogHeader() << " Async Copy To Device" << endl; try{ StreamingKernels::copyFittingConstParamAsync(_hConstP.getPtr(), getStreamId() ,_stream);CUDA_ERROR_CHECK(); _hdFgBuffer.copyToDeviceAsync(_stream, _myJob.getFgBufferSizeShort()); _hdCopyInGroup.copyToDeviceAsync(_stream); // copy xtalk neighbor map if(_myJob.performCrossTalkCorrection()) { StreamingKernels::copyXtalkConstParamAsync(_hConstXtalkP.getPtr(), getStreamId() ,_stream);CUDA_ERROR_CHECK(); _dNeiIdxMap.copyAsync(_hNeiIdxMap, _stream, sizeof(int)*_myJob.getNumBeads()*_myJob.getNumXtalkNeighbours()); _dSampleNeiIdxMap.copyAsync(_hSampleNeiIdxMap, _stream, sizeof(int)*(GENERIC_SIMPLE_XTALK_SAMPLE)* _myJob.getNumXtalkNeighbours()); } } catch(cudaException &e) { cout << getLogHeader() << "Encountered Error during Copy to device!" << endl; throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__); } } void SimpleSingleFitStream::executeKernel() { if(_verbose) cout << getLogHeader() << " Async Kernel Exec" << endl; dim3 block(32,32); dim3 grid( (_F*_myJob.getFlowBlockSize()+ block.x-1)/block.x , (_padN+block.y-1)/block.y); StreamingKernels::transposeDataToFloat (grid, block, 0 ,_stream,_dFgBufferFloat.getPtr(), _hdFgBuffer.getPtr(), _F*_myJob.getFlowBlockSize(), _padN); int StructLength = (sizeof(BeadParams)/sizeof(float)); if((sizeof(BeadParams)%sizeof(float)) != 0 ) { cerr << "Structure not a multiple of sizeof(float), transpose not possible" << endl; exit(-1); } grid.x = (StructLength + block.x-1)/block.x ; grid.y = (_padN+block.y-1)/block.y; StreamingKernels::transposeData( grid, block, 0, _stream, (float*)_dBeadParamTransp.getPtr(), (float*)_hdBeadParams.getPtr(), StructLength, _padN); block.x = getBeadsPerBlock(); block.y = 1; grid.y = 1; grid.x = (_N+block.x-1)/block.x; _dNeiContribution.memSet(0); _dXtalkScratch.memSet(0); _dXtalk.memSet(0); // cross talk correction is performed for 3-series chips only if (_myJob.performCrossTalkCorrection()) { for (int fnum=0; fnum<_myJob.getFlowBlockSize(); ++fnum) { StreamingKernels::NeighbourContributionToXtalk( grid, block, 0, _stream, _dR.getPtr(), // N _dCopies.getPtr(), // N _dPhi.getPtr(), // N (float*)_hdShiftedBkg.getPtr() + fnum*_F, // FLxF (float*)_dFgBufferFloat.getPtr() + fnum*_padN*_F, // FLxFxN _hdBeadState.getPtr(), _myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num fnum, _N, // 4 _F, // 4 //xtalk arguments _dXtalkScratch.getPtr(), _dNeiContribution.getPtr(), getStreamId()); /* cudaDeviceSynchronize(); cout << "flow: " << fnum << " done" << endl; int Ids[19] = {3905,3838,3907,3904,3979,3906,3981,3908,3978,4053,4055,3982,4052,4128,4054,4130,4056,4129}; //int Ids[19] = {3838,3839,3840,3905,3906,3907,3908,3909,3979,3980,3982,3983,4053,4054,4055,4056,4057,4130}; // int Ids[19] = {4442,4369,4295,4371,4368,4441,4370,-1,4372,4440,4511,-1,-1,4510,-1,4512,-1,-1,4578}; // int Ids[19] = {4441,4293,4294,4295,4367,4368,4369,4370,4371,4439,4440,4442,-1,4509,4510,4511,4512,-1,-1}; for(int i=0; i<19; i++) if(Ids[i] >= 0 ) cout << Ids[i]<< "," <<(_myJob.getBeadParams())[Ids[i]].x << "," <<(_myJob.getBeadParams())[Ids[i]].y << endl; else cout << "4442,-1,-1" << endl; */ /* if(fnum==0){ printf("bead XTalk contribution:\n"); float * beadXTalkcontr = new float[_dNeiContribution.getNumElements()]; _dNeiContribution.copyOut(beadXTalkcontr); int num_beads = ((_N+32-1)/32) * 32; for(int n=0;n<_N ;n++){ float * thisBead = beadXTalkcontr + n; printf ("%d, %d, ", (_myJob.getBeadParams())[n].x, (_myJob.getBeadParams())[n].y ); float sumF = 0; for(int f=0; f<_F; f++){ printf ("%f, ", thisBead[f*num_beads]); //sumF += thisBead[f*num_beads]; } printf("\n"); } } */ StreamingKernels::XtalkAccumulation( grid, block, 0, _stream, _hdBeadState.getPtr(), _N, // 4 _F, // 4 _dNeiIdxMap.getPtr(), _dNeiContribution.getPtr(), _dXtalk.getPtr(), getStreamId()); cudaDeviceSynchronize(); /* cudaDeviceSynchronize(); cout << "flow: " << fnum << " done" << endl; if(fnum==19){ printf("bead XTalk:\n"); float * beadXTalk = new float[_dXtalk.getNumElements()]; _dXtalk.copyOut(beadXTalk); int num_beads = ((_N+32-1)/32) * 32; for(int n=0;n<_N ;n++){ float * thisBead = beadXTalk + n; printf ("%d, %d, ", (_myJob.getBeadParams())[n].x, (_myJob.getBeadParams())[n].y ); float sumF = 0; for(int f=0; f<_F; f++){ //printf ("%f, ", thisBead[f*num_beads]); sumF += thisBead[f*num_beads]; } printf("%f\n", sumF); } } */ StreamingKernels::CalculateGenericXtalkForSimpleModel( 1, // dumb version to get things going..ultra fast already GENERIC_SIMPLE_XTALK_SAMPLE, 0, _stream, _N, _F, //_myJob.getRegionWidth(), //_myJob.getRegionHeight(), _hdBeadState.getPtr(), _dSampleNeiIdxMap.getPtr(), _dNeiContribution.getPtr(), _dGenericXtalk.getPtr(), _dNeiContribution.getPtr(), // use for generic xtalk getStreamId()); /* if(_myJob.getAbsoluteFlowNum() + fnum == 99){ float * genXtalk = new float[_dFgBufferFloat.getNumElements()]; _dNeiContribution.copyOut(genXtalk); ImgRegParams irP; irP.init(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getMaxRegionWidth(),_myJob.getMaxRegionHeight()); size_t rC = _myJob.getRegCol(); size_t rR = _myJob.getRegRow(); size_t regId = irP.getRegId(rC,rR); printf("%lu, ", regId); for(int f=0; f < _F; f++) { printf("%f, ", genXtalk[f]); } printf("\n"); } */ StreamingKernels::ComputeXtalkAndZeromerCorrectedTrace( grid, block, 0, _stream, fnum, (float*)_dFgBufferFloat.getPtr() + fnum*_padN*_F, // FLxFxN _N, // 4 _F, // 4 _dNeiContribution.getPtr(), _dXtalk.getPtr(), _dCopies.getPtr(), // N _dR.getPtr(), // N _dPhi.getPtr(), // N _dGain.getPtr(), // N (float*)_hdShiftedBkg.getPtr() + fnum*_F, _hdDarkMatter.getPtr(), // FLxF _dPCA_Vals.getPtr(), _myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num getStreamId()); } } else { StreamingKernels::PreSingleFitProcessing( grid, block, 0 , _stream, // Here FL stands for flows // inputs from data reorganization _dCopies.getPtr(), // N _dR.getPtr(), // N _dPhi.getPtr(), // N _dGain.getPtr(), // N _dAmpl.getPtr(), // FLxN _hdShiftedBkg.getPtr(), // FLxF _hdDarkMatter.getPtr(), // FLxF _dPCA_Vals.getPtr(), _dFgBufferFloat.getPtr(), // FLxFxN // other inputs _myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num _N, // 4 _F, // 4 false, getStreamId(), _myJob.getFlowBlockSize()); } // perform projection search for amplitude estimation if ((_myJob.getAbsoluteFlowNum() > 19) && _myJob.InitializeAmplitude()) { StreamingKernels::ProjectionSearch( grid, block, 0, _stream, _hdBeadState.getPtr(), _dFgBufferFloat.getPtr(), _hdCrudeEmphVector.getPtr(), _hdCoarseNucRise.getPtr(), _dCopies.getPtr(), _dfval.getPtr(), _myJob.getAbsoluteFlowNum(), _N, _F, getStreamId(), _myJob.getFlowBlockSize()); } //ampl update // perform exponential tail fitting if (_myJob.performExpTailFitting()) { // only done in first 20 flows // TODO: need to initialize taub adjustment to 1 if not fitting it if (_myJob.getAbsoluteFlowNum() == 0 && _myJob.performTauAdjInExpTailFit()) { StreamingKernels::TaubAdjustForExponentialTailFitting( grid, block, 0, _stream, _hdBeadState.getPtr(), _dFgBufferFloat.getPtr(), // FLxFxN, _dAmpl.getPtr(), // FLxN _dR.getPtr(), // N _dCopies.getPtr(), // N _dPhi.getPtr(), // N _davg_trc.getPtr(), _dfval.getPtr(), _dtmp_fval.getPtr(), _derr.getPtr(), _djac.getPtr(), _N, _F, _dTau_Adj.getPtr(), // output it is a per bead parameter getStreamId(), _myJob.getFlowBlockSize()); } if (_myJob.performBkgAdjInExpTailFit()) { StreamingKernels::ExponentialTailFitting( grid, block, 0, _stream, _myJob.expTailFitBkgAdjLimit(), _myJob.expTailFitBkgDcLowerLimit(), _hdBeadState.getPtr(), _dTau_Adj.getPtr(), _dAmpl.getPtr(), _dR.getPtr(), _dCopies.getPtr(), _dPhi.getPtr(), // N _dFgBufferFloat.getPtr(), _hdShiftedBkg.getPtr(), _dtmp_fval.getPtr(), _N, _F, _myJob.getAbsoluteFlowNum(), getStreamId(), _myJob.getFlowBlockSize()); } } int sharedMem = _myJob.getEmphVecSize(); #if PROJECTION_ONLY if(_myJob.getAbsoluteFlowNum() < 20 ) { #endif //std::cout << "====================> Numframes: " << numFrames << std::endl; // perform single flow fitting switch(_fittype){ case 1: StreamingKernels::PerFlowLevMarFit(getL1Setting(), grid, block, sharedMem, _stream, // inputs _dFgBufferFloat.getPtr(), _hdFineEmphVector.getPtr(), _hdFineNucRise.getPtr(), // bead params _dCopies.getPtr(), _hdBeadState.getPtr(), // scratch space in global memory _derr.getPtr(), // _dfval.getPtr(), // NxF _dtmp_fval.getPtr(), // NxF _dMeanErr.getPtr(), // other inputs _myJob.getAmpLowLimit(), _myJob.getkmultHighLimit(), _myJob.getkmultLowLimit(), _myJob.getkmultAdj(), _myJob.fitkmultAlways(), _myJob.getAbsoluteFlowNum() , // real flow number _myJob.getNumBeads(), // 4 _F, _myJob.useDynamicEmphasis(), getStreamId(), // stream id for offset in const memory _myJob.getFlowBlockSize() ); break; case 2: StreamingKernels::PerFlowHybridFit(getL1Setting(), grid, block, sharedMem, _stream, // inputs _dFgBufferFloat.getPtr(), _hdFineEmphVector.getPtr(), _hdFineNucRise.getPtr(), // bead params _dCopies.getPtr(), _hdBeadState.getPtr(), // scratch space in global memory _derr.getPtr(), // _dfval.getPtr(), // NxF _dtmp_fval.getPtr(), // NxF _dMeanErr.getPtr(), // other inputs _myJob.getAmpLowLimit(), _myJob.getkmultHighLimit(), _myJob.getkmultLowLimit(), _myJob.getkmultAdj(), _myJob.fitkmultAlways(), _myJob.getAbsoluteFlowNum() , // real flow number _myJob.getNumBeads(), // 4 _F, _myJob.useDynamicEmphasis(), getStreamId(), // stream id for offset in const memory 3, // switchToLevMar ??? _myJob.getFlowBlockSize() ); break; case 3: StreamingKernels::PerFlowRelaxKmultGaussNewtonFit(getL1Setting(), grid, block, sharedMem, _stream, // inputs _dFgBufferFloat.getPtr(), _hdFineEmphVector.getPtr(), _hdFineNucRise.getPtr(), // bead params _dCopies.getPtr(), _hdBeadState.getPtr(), // scratch space in global memory _derr.getPtr(), // _dfval.getPtr(), // NxF _dtmp_fval.getPtr(), // NxF _djac.getPtr(), //NxF _dMeanErr.getPtr(), // other inputs _myJob.getAmpLowLimit(), _myJob.getkmultHighLimit(), _myJob.getkmultLowLimit(), _myJob.getkmultAdj(), _myJob.fitkmultAlways(), _myJob.getAbsoluteFlowNum() , // real flow number _myJob.getNumBeads(), // 4 _F, _myJob.useDynamicEmphasis(), _myJob.useSlowKmultInit(), getStreamId(), // stream id for offset in const memory _myJob.getFlowBlockSize() ); break; case 0: default: StreamingKernels::PerFlowGaussNewtonFit(getL1Setting(), grid, block, sharedMem, _stream, // inputs _dFgBufferFloat.getPtr(), _hdFineEmphVector.getPtr(), _hdFineNucRise.getPtr(), // bead params _dCopies.getPtr(), _hdBeadState.getPtr(), // scratch space in global memory _derr.getPtr(), // _dfval.getPtr(), // NxF _dtmp_fval.getPtr(), // NxF _dMeanErr.getPtr(), // other inputs _myJob.getAmpLowLimit(), _myJob.getkmultHighLimit(), _myJob.getkmultLowLimit(), _myJob.getkmultAdj(), _myJob.fitkmultAlways(), _myJob.getAbsoluteFlowNum() , // real flow number _myJob.getNumBeads(), // 4 _F, _myJob.useDynamicEmphasis(), getStreamId(), // stream id for offset in const memory _myJob.getFlowBlockSize() ); } #if PROJECTION_ONLY } #endif block.x = 32; block.y = 32; grid.x = (_padN+block.y-1)/block.y; grid.y = (StructLength + block.x-1)/block.x; StreamingKernels::transposeData( grid, block, 0, _stream, (float*)_hdBeadParams.getPtr(), (float*)_dBeadParamTransp.getPtr(), _padN, StructLength); } void SimpleSingleFitStream::copyToHost() { //cout << getId() << " Async copy back" <<endl; //cudaMemcpyAsync( _h_pBeadParams, _d_pBeadParams, _copyOutSize , cudaMemcpyDeviceToHost, _stream); CUDA_ERROR_CHECK(); _hdCopyOutGroup.copyToHostAsync(_stream); #if 0 // To use this, you'll need to tweak JobWrapper.h to make BkgModelWorkInfo * _info public. cudaMemcpy( _h_pBeadParams, _d_pBeadParams, _copyOutSize , cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(); ostringstream name; name << "dumpFile_" << getpid() << "_" << _myJob._info->bkgObj->region_data->region->index; ofstream out( name.str().c_str() ); out << "N " << _N << "\n"; out << "F " << _F << "\n"; out << "padN " << _padN << "\n"; out << "copyInSize " << _copyInSize << "\n"; out << "copyOutSize " << _copyOutSize << "\n"; out << "host state array: " << _h_pBeadState << "\n"; out << "device state array: " << _d_pBeadState << "\n"; // We've got N BeadParams... for( size_t i = 0 ; i < _N ; ++i ) { BeadParams &bp = _h_pBeadParams[i]; out << i << ":\n"; out << " Copies " << bp.Copies << "\n"; out << " R " << bp.R << "\n"; out << " dmult " << bp.dmult << "\n"; out << " gain " << bp.gain << "\n"; out << " Ampl, kmult " << "\n"; for( size_t j = 0 ; j < _myJob.getFlowBlockSize() ; ++j ) { out << " " << j << ": " << bp.Ampl[j] << " " << bp.kmult[j] << "\n"; } out << " pca_vals " << "\n"; for( size_t j = 0 ; j < NUM_DM_PCA ; ++j ) { out << " " << j << ": " << bp.pca_vals[j] << "\n"; } out << " tau_adj " << bp.tau_adj << "\n"; //out << " my_state (ptr) " << bp.my_state << "\n"; out << " trace_ndx " << bp.trace_ndx << "\n"; out << " x " << bp.x << "\n"; out << " y " << bp.y << "\n"; } for( size_t i = 0 ; i < _N ; ++i ) { bead_state & bs = _h_pBeadState[i]; out << "state " << i << ": " << "\n"; out << " bad_read " << bs.bad_read << "\n"; out << " clonal_read " << bs.clonal_read << "\n"; out << " corrupt " << bs.corrupt << "\n"; out << " pinned " << bs.pinned << "\n"; out << " random_samp " << bs.random_samp << "\n"; out << " key_norm " << bs.key_norm << "\n"; out << " ppf " << bs.ppf << "\n"; out << " ssq " << bs.ssq << "\n"; out << " avg_err " << bs.avg_err << "\n"; } #endif } void SimpleSingleFitStream::preProcessCpuSteps() { _myJob.calculateCoarseNucRise(); _myJob.calculateFineNucRise(); } int SimpleSingleFitStream::getBeadsPerBlock() { if(_bpb < 0){ return BlockSizeDefaultSetting(); } return _bpb; } int SimpleSingleFitStream::getL1Setting() { if(_l1type < 0 || _l1type > 2){ return l1DefaultSetting(); } return _l1type; } ///////////////////////////////////////////////////////////////////////// //static Function void SimpleSingleFitStream::requestResources( int flow_key, int flow_block_size, float deviceFraction) { size_t devAlloc = static_cast<size_t>( deviceFraction * max( getMaxDeviceMem( flow_key, flow_block_size, 0, 0 ), getMaxDeviceMem( 0, flow_block_size, 0, 0 ) ) ); size_t hostAlloc = max( getMaxHostMem(flow_key, flow_block_size), getMaxHostMem(0, flow_block_size) ); cout << "CUDA: SingleFitStream active and resources requested dev = "<< devAlloc/(1024.0*1024) << "MB ("<< (int)(deviceFraction*100)<<"%) host = " << hostAlloc/(1024.0*1024) << "MB" <<endl; cudaResourcePool::requestDeviceMemory(devAlloc); cudaResourcePool::requestHostMemory(hostAlloc); } size_t SimpleSingleFitStream::getMaxHostMem(int flow_key, int flow_block_size) { WorkSet Job( flow_key, flow_block_size ); size_t ret = 0; if(GpuMultiFlowFitControl::doGPUTraceLevelXtalk()){ ret += Job.padTo128Bytes(sizeof(ConstXtalkParams)); ret += Job.getXtalkNeiIdxMapSize(true); ret += Job.getXtalkSampleNeiIdxMapSize(true); } ret += Job.padTo128Bytes(sizeof(ConstParams)); ret += Job.getFgBufferSizeShort(true); ret += Job.getBeadParamsSize(true); ret += Job.getBeadStateSize(true); ret += Job.getDarkMatterSize(true); ret += Job.getShiftedBackgroundSize(true); ret += Job.getEmphVecSize(true); // crude emphasis ret += Job.getEmphVecSize(true); // fine emphasis ret += Job.getCoarseNucRiseSize(true); ret += Job.getFineNucRiseSize(true); return ret; } size_t SimpleSingleFitStream::getScratchSpaceAllocSize(const WorkSet & Job) { size_t ScratchSize = 0; ScratchSize += 7 * Job.getPaddedN() * Job.getNumFrames(); ScratchSize += 1* Job.getPaddedN() * Job.getFlowBlockSize(); if(GpuMultiFlowFitControl::doGPUTraceLevelXtalk()){ ScratchSize += MAX_XTALK_NEIGHBOURS * Job.getPaddedN() * Job.getNumFrames(); ScratchSize += MAX_XTALK_NEIGHBOURS * Job.getPaddedGenericXtalkSample(); ScratchSize += Job.getPaddedGenericXtalkSample() * Job.getNumFrames(); } ScratchSize *= sizeof(float); return ScratchSize; } size_t SimpleSingleFitStream::getMaxDeviceMem( int flow_key, int flow_block_size, int numFrames, int numBeads) { WorkSet Job( flow_key, flow_block_size ); // if numFrames/numBeads are passed overwrite the predefined maxFrames/maxBeads // for the size calculation if(numFrames >0) Job.setMaxFrames(numFrames); if(numBeads> 0) Job.setMaxBeads(numBeads); size_t ret = 0; ret = getScratchSpaceAllocSize(Job); ret += Job.getFgBufferSizeShort(true); ret += Job.getBeadParamsSize(true); ret += Job.getBeadStateSize(true); ret += Job.getDarkMatterSize(true); ret += Job.getShiftedBackgroundSize(true); ret += Job.getEmphVecSize(true); // crude emphasis ret += Job.getEmphVecSize(true); // fine emphasis ret += Job.getCoarseNucRiseSize(true); ret += Job.getFineNucRiseSize(true); ret += Job.getFgBufferSizeShort(true); ret += Job.getFgBufferSize(true); ret += Job.getBeadParamsSize(true); //cout << "getMAxDevice SingleFit N: " << N << "("<< Job.getPaddedN() <<") F: " << F << " ret: " << ret/(1024.0*1024) << endl; //std::cout << "====> mem for single fit: " << ret << " bytes" << std::endl; return ret; } void SimpleSingleFitStream::setBeadsPerBlock(int bpb) { _bpb = bpb; } void SimpleSingleFitStream::setL1Setting(int type) // 0:sm=l1, 1:sm>l1, 2:sm<l1 { if( 0 <= type && type <= 2 ) _l1type = type; } void SimpleSingleFitStream::setHybridIter(int hybridIter) { _hybriditer = hybridIter; } void SimpleSingleFitStream::printSettings() { cout << "CUDA: SingleFitStream SETTINGS: blocksize = " << _bpb << " l1setting = " ; switch(_l1type){ case 0: cout << "cudaFuncCachePreferEqual" << endl;; break; case 1: cout << "cudaFuncCachePreferShared" <<endl; break; case 2: cout << "cudaFuncCachePreferL1" << endl; break; default: cout << "GPU specific default" << endl; } } void SimpleSingleFitStream::setFitType(int type) // 0:gauss newton, 1:lev mar { _fittype = type; }
the_stack
__constant__ uint32_t c_PaddedMessage80[20]; // padded message (80 bytes + padding?) static uint32_t *d_found[MAX_GPUS]; #define sM 16 #define O1 13 #define O2 9 #define O3 6 #define INPUT_BLOCK_ADD \ B0 = B0 + M0; \ B1 = B1 + M1; \ B2 = B2 + M2; \ B3 = B3 + M3; \ B4 = B4 + M4; \ B5 = B5 + M5; \ B6 = B6 + M6; \ B7 = B7 + M7; \ B8 = B8 + M8; \ B9 = B9 + M9; \ BA = BA + MA; \ BB = BB + MB; \ BC = BC + MC; \ BD = BD + MD; \ BE = BE + ME; \ BF = BF + MF; \ #define INPUT_BLOCK_SUB \ C0 = C0 - M0; \ C1 = C1 - M1; \ C2 = C2 - M2; \ C3 = C3 - M3; \ C4 = C4 - M4; \ C5 = C5 - M5; \ C6 = C6 - M6; \ C7 = C7 - M7; \ C8 = C8 - M8; \ C9 = C9 - M9; \ CA = CA - MA; \ CB = CB - MB; \ CC = CC - MC; \ CD = CD - MD; \ CE = CE - ME; \ CF = CF - MF; \ #define XOR_W \ A00 ^= Wlow; \ A01 ^= Whigh; \ #define SWAP(v1, v2) \ v1^=v2;\ v2 ^= v1;\ v1 ^= v2; #define SWAP_BC \ SWAP(B0, C0); \ SWAP(B1, C1); \ SWAP(B2, C2); \ SWAP(B3, C3); \ SWAP(B4, C4); \ SWAP(B5, C5); \ SWAP(B6, C6); \ SWAP(B7, C7); \ SWAP(B8, C8); \ SWAP(B9, C9); \ SWAP(BA, CA); \ SWAP(BB, CB); \ SWAP(BC, CC); \ SWAP(BD, CD); \ SWAP(BE, CE); \ SWAP(BF, CF); \ #define PERM_ELT(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) \ xa0 = ((xa0 \ ^ (ROTL32(xa1, 15) * 5U) \ ^ xc) * 3U) \ ^ xb1 ^ (xb2 & ~xb3) ^ xm; \ xb0 = (~(ROTL32(xb0, 1) ^ xa0)); \ #define PERM_STEP_0 \ PERM_ELT(A00, A0B, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A01, A00, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A02, A01, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A03, A02, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A04, A03, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A05, A04, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A06, A05, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A07, A06, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A08, A07, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A09, A08, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A0A, A09, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A0B, A0A, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A00, A0B, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A01, A00, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A02, A01, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A03, A02, BF, BC, B8, B5, C9, MF); \ #define PERM_STEP_1 \ PERM_ELT(A04, A03, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A05, A04, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A06, A05, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A07, A06, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A08, A07, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A09, A08, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A0A, A09, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A0B, A0A, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A00, A0B, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A01, A00, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A02, A01, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A03, A02, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A04, A03, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A05, A04, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A06, A05, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A07, A06, BF, BC, B8, B5, C9, MF); \ #define PERM_STEP_2 \ PERM_ELT(A08, A07, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A09, A08, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A0A, A09, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A0B, A0A, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A00, A0B, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A01, A00, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A02, A01, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A03, A02, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A04, A03, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A05, A04, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A06, A05, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A07, A06, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A08, A07, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A09, A08, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A0A, A09, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A0B, A0A, BF, BC, B8, B5, C9, MF); \ #define APPLY_P \ B0 = ROTL32(B0, 17); \ B1 = ROTL32(B1, 17); \ B2 = ROTL32(B2, 17); \ B3 = ROTL32(B3, 17); \ B4 = ROTL32(B4, 17); \ B5 = ROTL32(B5, 17); \ B6 = ROTL32(B6, 17); \ B7 = ROTL32(B7, 17); \ B8 = ROTL32(B8, 17); \ B9 = ROTL32(B9, 17); \ BA = ROTL32(BA, 17); \ BB = ROTL32(BB, 17); \ BC = ROTL32(BC, 17); \ BD = ROTL32(BD, 17); \ BE = ROTL32(BE, 17); \ BF = ROTL32(BF, 17); \ PERM_STEP_0; \ PERM_STEP_1; \ PERM_STEP_2; \ A0B = (A0B + C6); \ A0A = (A0A + C5); \ A09 = (A09 + C4); \ A08 = (A08 + C3); \ A07 = (A07 + C2); \ A06 = (A06 + C1); \ A05 = (A05 + C0); \ A04 = (A04 + CF); \ A03 = (A03 + CE); \ A02 = (A02 + CD); \ A01 = (A01 + CC); \ A00 = (A00 + CB); \ A0B = (A0B + CA); \ A0A = (A0A + C9); \ A09 = (A09 + C8); \ A08 = (A08 + C7); \ A07 = (A07 + C6); \ A06 = (A06 + C5); \ A05 = (A05 + C4); \ A04 = (A04 + C3); \ A03 = (A03 + C2); \ A02 = (A02 + C1); \ A01 = (A01 + C0); \ A00 = (A00 + CF); \ A0B = (A0B + CE); \ A0A = (A0A + CD); \ A09 = (A09 + CC); \ A08 = (A08 + CB); \ A07 = (A07 + CA); \ A06 = (A06 + C9); \ A05 = (A05 + C8); \ A04 = (A04 + C7); \ A03 = (A03 + C6); \ A02 = (A02 + C5); \ A01 = (A01 + C4); \ A00 = (A00 + C3); \ #define APPLY_P_FINAL \ B0 = ROTL32(B0, 17); \ B1 = ROTL32(B1, 17); \ B2 = ROTL32(B2, 17); \ B3 = ROTL32(B3, 17); \ B4 = ROTL32(B4, 17); \ B5 = ROTL32(B5, 17); \ B6 = ROTL32(B6, 17); \ B7 = ROTL32(B7, 17); \ B8 = ROTL32(B8, 17); \ B9 = ROTL32(B9, 17); \ BA = ROTL32(BA, 17); \ BB = ROTL32(BB, 17); \ BC = ROTL32(BC, 17); \ BD = ROTL32(BD, 17); \ BE = ROTL32(BE, 17); \ BF = ROTL32(BF, 17); \ PERM_STEP_0; \ PERM_STEP_1; \ PERM_STEP_2; \ #define INCR_W if ((Wlow = (Wlow + 1)) == 0) \ Whigh = (Whigh + 1); \ static __device__ void axiom_shabal256_gpu_hash_64(uint32_t *g_hash) { const uint32_t A_init_256[] = { 0x52F84552, 0xE54B7999, 0x2D8EE3EC, 0xB9645191, 0xE0078B86, 0xBB7C44C9, 0xD2B5C1CA, 0xB0D2EB8C, 0x14CE5A45, 0x22AF50DC, 0xEFFDBC6B, 0xEB21B74A }; const uint32_t B_init_256[] = { 0xB555C6EE, 0x3E710596, 0xA72A652F, 0x9301515F, 0xDA28C1FA, 0x696FD868, 0x9CB6BF72, 0x0AFE4002, 0xA6E03615, 0x5138C1D4, 0xBE216306, 0xB38B8890, 0x3EA8B96B, 0x3299ACE4, 0x30924DD4, 0x55CB34A5 }; const uint32_t C_init_256[] = { 0xB405F031, 0xC4233EBA, 0xB3733979, 0xC0DD9D55, 0xC51C28AE, 0xA327B8E1, 0x56C56167, 0xED614433, 0x88B59D60, 0x60E2CEBA, 0x758B4B8B, 0x83E82A7F, 0xBC968828, 0xE6E00BF7, 0xBA839E55, 0x9B491C60 }; uint32_t *Hash = &g_hash[0]; // [hashPosition * 8] uint32_t A00 = A_init_256[0], A01 = A_init_256[1], A02 = A_init_256[2], A03 = A_init_256[3], A04 = A_init_256[4], A05 = A_init_256[5], A06 = A_init_256[6], A07 = A_init_256[7], A08 = A_init_256[8], A09 = A_init_256[9], A0A = A_init_256[10], A0B = A_init_256[11]; uint32_t B0 = B_init_256[0], B1 = B_init_256[1], B2 = B_init_256[2], B3 = B_init_256[3], B4 = B_init_256[4], B5 = B_init_256[5], B6 = B_init_256[6], B7 = B_init_256[7], B8 = B_init_256[8], B9 = B_init_256[9], BA = B_init_256[10], BB = B_init_256[11], BC = B_init_256[12], BD = B_init_256[13], BE = B_init_256[14], BF = B_init_256[15]; uint32_t C0 = C_init_256[0], C1 = C_init_256[1], C2 = C_init_256[2], C3 = C_init_256[3], C4 = C_init_256[4], C5 = C_init_256[5], C6 = C_init_256[6], C7 = C_init_256[7], C8 = C_init_256[8], C9 = C_init_256[9], CA = C_init_256[10], CB = C_init_256[11], CC = C_init_256[12], CD = C_init_256[13], CE = C_init_256[14], CF = C_init_256[15]; uint32_t M0, M1, M2, M3, M4, M5, M6, M7, M8, M9, MA, MB, MC, MD, ME, MF; M0 = Hash[0]; M1 = Hash[1]; M2 = Hash[2]; M3 = Hash[3]; M4 = Hash[4]; M5 = Hash[5]; M6 = Hash[6]; M7 = Hash[7]; M8 = 0; M9 = 0; MA = 0; MB = 0; MC = 0; MD = 0; ME = 0; MF = 0; INPUT_BLOCK_ADD; A00 ^= 1; APPLY_P; INPUT_BLOCK_SUB; SWAP_BC; M0 = 0x80; M1 = M2 = M3 = M4 = M5 = M6 = M7 = M8 = M9 = MA = MB = MC = MD = ME = MF = 0; INPUT_BLOCK_ADD; A00 ^= 2; APPLY_P; SWAP_BC; A00 ^= 2; APPLY_P; SWAP_BC; A00 ^= 2; APPLY_P; SWAP_BC; A00 ^= 2; APPLY_P_FINAL; Hash[0] = B0; Hash[1] = B1; Hash[2] = B2; Hash[3] = B3; Hash[4] = B4; Hash[5] = B5; Hash[6] = B6; Hash[7] = B7; } __host__ void axiom_setBlock_80(void *pdata) { unsigned char PaddedMessage[4*8]; memcpy(PaddedMessage, pdata, 4 * 8); cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 4 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice); } __global__ __launch_bounds__(256, 1) void axiom_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t target, uint32_t *d_found) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { int N = 65536; uint32_t hash[65536][8]; int R = 2; uint32_t nounce = (startNounce + thread); hash[0][0] = c_PaddedMessage80[0]; hash[0][1] = c_PaddedMessage80[1]; hash[0][2] = c_PaddedMessage80[2]; hash[0][3] = c_PaddedMessage80[3]; hash[0][4] = c_PaddedMessage80[4]; hash[0][5] = c_PaddedMessage80[5]; hash[0][6] = c_PaddedMessage80[6]; hash[0][7] = c_PaddedMessage80[7] ^ nounce; for (int i = 0; i < N-1; i++) { axiom_shabal256_gpu_hash_64(&hash[i][0]); hash[i + 1][0] = hash[i][0]; hash[i + 1][1] = hash[i][1]; hash[i + 1][2] = hash[i][2]; hash[i + 1][3] = hash[i][3]; hash[i + 1][4] = hash[i][4]; hash[i + 1][5] = hash[i][5]; hash[i + 1][6] = hash[i][6]; hash[i + 1][7] = hash[i][7]; } axiom_shabal256_gpu_hash_64(&hash[N-1][0]); for (int r = 1; r < R; r++) { for (int b = 0; b < N; b++) { int p = b > 0 ? b - 1 : N - 1; int q = hash[p][0] % (N - 1); int j = (b + q) % N; axiom_shabal256_gpu_hash_64(&hash[p][0]); axiom_shabal256_gpu_hash_64(&hash[j][0]); } } if (hash[N - 1][7] <= target) { uint32_t tmp = atomicExch(&(d_found[0]), nounce); if (tmp != 0xffffffff) d_found[1] = tmp; } } } __host__ void axiom_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t target, uint32_t *h_found) { const uint32_t threadsperblock = 1; // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); cudaMemset(d_found[thr_id], 0xffffffff, 2 * sizeof(uint32_t)); axiom_gpu_hash_80 << <grid, block >> >(threads, startNounce, target,d_found[thr_id] ); cudaMemcpy(h_found, d_found[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); }
the_stack
const int CUDA_NUM_THREADS = 128; const int BLOCK_SIZE_LIMIT = 32768; // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { int ret = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; return (ret > BLOCK_SIZE_LIMIT) ? BLOCK_SIZE_LIMIT : ret; } __global__ void cf_kernel(V_ID rowLeft, V_ID rowRight, E_ID colLeft, const NodeStruct* row_ptrs, const EdgeStruct* col_idxs, Vertex* old_pr_fb, Vertex* new_pr_fb) { typedef cub::BlockScan<E_ID, CUDA_NUM_THREADS> BlockScan; __shared__ BlockScan::TempStorage temp_storage; __shared__ float vec[CUDA_NUM_THREADS * K]; __shared__ float accErr[CUDA_NUM_THREADS * K]; __shared__ float srcVec[CUDA_NUM_THREADS * K]; __shared__ EdgeStruct es[CUDA_NUM_THREADS]; __shared__ E_ID blkColStart; for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft; blkRowStart <= rowRight; blkRowStart += blockDim.x * gridDim.x) { E_ID myNumEdges = 0, scratchOffset, totalNumEdges = 0; V_ID curVtx = blkRowStart + threadIdx.x; if (curVtx <= rowRight) { NodeStruct ns = row_ptrs[curVtx - rowLeft]; E_ID start_col_idx, end_col_idx = ns.index; if (curVtx == rowLeft) start_col_idx = colLeft; else start_col_idx = row_ptrs[curVtx - rowLeft - 1].index; myNumEdges = end_col_idx - start_col_idx; if (threadIdx.x == 0) blkColStart = start_col_idx; for (int i = 0; i < K; i++) { vec[threadIdx.x * K + i] = old_pr_fb[curVtx - rowLeft].v[i]; accErr[threadIdx.x * K + i] = 0; } } __syncthreads(); BlockScan(temp_storage).ExclusiveSum(myNumEdges, scratchOffset, totalNumEdges); E_ID done = 0; while (totalNumEdges > 0) { if (threadIdx.x < totalNumEdges) { es[threadIdx.x] = col_idxs[blkColStart + done + threadIdx.x - colLeft]; __syncthreads(); int blksize = totalNumEdges > CUDA_NUM_THREADS ? CUDA_NUM_THREADS : totalNumEdges; for (int i = 0; i < K; i++) { srcVec[i * blksize + threadIdx.x] = old_pr_fb[es[(i * blksize + threadIdx.x) / K].src].v[threadIdx.x % K]; } __syncthreads(); int dst_tid = es[threadIdx.x].dst - blkRowStart; float dotProd = 0.0f; for (int i = 0; i < K; i++) dotProd += srcVec[threadIdx.x * K + i] * vec[dst_tid * K + i]; float err = es[threadIdx.x].weight - dotProd; for (int i = 0; i < K; i++) accErr[dst_tid * K + i] += err * srcVec[threadIdx.x * K + i]; } done += CUDA_NUM_THREADS; totalNumEdges -= (totalNumEdges > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : totalNumEdges; } __syncthreads(); if (curVtx <= rowRight) { for (int i = 0; i < K; i++) { int offset = threadIdx.x * K + i; new_pr_fb[curVtx].v[i] = vec[offset] + GAMMA * (accErr[offset] - LAMBDA * vec[offset]); } } } } void pull_app_task_impl(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 5); assert(task->regions.size() == 5); const GraphPiece *piece = (GraphPiece*) task->local_args; const AccessorRO<NodeStruct, 1> acc_row_ptr(regions[0], FID_DATA); const AccessorRO<V_ID, 1> acc_in_vtx(regions[1], FID_DATA); const AccessorRO<EdgeStruct, 1> acc_col_idx(regions[2], FID_DATA); const AccessorRO<Vertex, 1> acc_old_pr(regions[3], FID_DATA); const AccessorWO<Vertex, 1> acc_new_pr(regions[4], FID_DATA); Rect<1> rect_row_ptr = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<1> rect_in_vtx = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<1> rect_col_idx = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<1> rect_old_pr = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<1> rect_new_pr = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); assert(acc_row_ptr.accessor.is_dense_arbitrary(rect_row_ptr)); assert(acc_in_vtx.accessor.is_dense_arbitrary(rect_in_vtx)); assert(acc_col_idx.accessor.is_dense_arbitrary(rect_col_idx)); assert(acc_old_pr.accessor.is_dense_arbitrary(rect_old_pr)); assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr)); const NodeStruct* row_ptrs = acc_row_ptr.ptr(rect_row_ptr); const V_ID* in_vtxs = acc_in_vtx.ptr(rect_in_vtx); const EdgeStruct* col_idxs = acc_col_idx.ptr(rect_col_idx); const Vertex* old_pr = acc_old_pr.ptr(rect_old_pr); Vertex* new_pr = acc_new_pr.ptr(rect_new_pr); V_ID rowLeft = rect_row_ptr.lo[0], rowRight = rect_row_ptr.hi[0]; E_ID colLeft = rect_col_idx.lo[0], colRight = rect_col_idx.hi[0]; //cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); checkCUDA(cudaMemcpy(piece->oldPrFb, old_pr, size_t(piece->nv) * sizeof(Vertex), cudaMemcpyHostToDevice)); cf_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, row_ptrs, col_idxs, piece->oldPrFb, piece->newPrFb); // Need to copy results back to new_pr cudaDeviceSynchronize(); checkCUDA(cudaMemcpy(new_pr, piece->newPrFb, (rowRight - rowLeft + 1) * sizeof(Vertex), cudaMemcpyDeviceToHost)); } __global__ void init_kernel(V_ID rowLeft, V_ID rowRight, E_ID colLeft, NodeStruct* row_ptrs, EdgeStruct* col_idxs, const E_ID* raw_rows, const V_ID* raw_cols, const WeightType* raw_weights) { for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x; n + rowLeft <= rowRight; n += blockDim.x * gridDim.x) { V_ID curVtx = n + rowLeft; E_ID startColIdx, endColIdx = raw_rows[n]; if (n == 0) startColIdx = colLeft; else startColIdx = raw_rows[n - 1]; row_ptrs[n].index = endColIdx; for (E_ID e = startColIdx; e < endColIdx; e++) { col_idxs[e - colLeft].src = raw_cols[e - colLeft]; col_idxs[e - colLeft].dst = curVtx; col_idxs[e - colLeft].weight = raw_weights[e - colLeft]; } } } GraphPiece pull_init_task_impl(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifdef VERTEX_DEGREE assert(false); #endif #ifndef EDGE_WEIGHT assert(false); #endif assert(regions.size() == 7); assert(task->regions.size() == 7); const Graph *graph = (Graph*) task->args; const AccessorWO<NodeStruct, 1> acc_row_ptr(regions[0], FID_DATA); const AccessorWO<V_ID, 1> acc_in_vtx(regions[1], FID_DATA); const AccessorWO<EdgeStruct, 1> acc_col_idx(regions[2], FID_DATA); const AccessorWO<Vertex, 1> acc_new_pr(regions[3], FID_DATA); const AccessorRO<E_ID, 1> acc_raw_rows(regions[4], FID_DATA); const AccessorRO<V_ID, 1> acc_raw_cols(regions[5], FID_DATA); Rect<1> rect_row_ptr = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<1> rect_in_vtx = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<1> rect_col_idx = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<1> rect_new_pr = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<1> rect_raw_rows = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); Rect<1> rect_raw_cols = runtime->get_index_space_domain( ctx, task->regions[5].region.get_index_space()); assert(acc_row_ptr.accessor.is_dense_arbitrary(rect_row_ptr)); assert(acc_in_vtx.accessor.is_dense_arbitrary(rect_in_vtx)); assert(acc_col_idx.accessor.is_dense_arbitrary(rect_col_idx)); assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr)); assert(acc_raw_rows.accessor.is_dense_arbitrary(rect_raw_rows)); assert(acc_raw_cols.accessor.is_dense_arbitrary(rect_raw_cols)); NodeStruct* row_ptrs = acc_row_ptr.ptr(rect_row_ptr); V_ID* in_vtxs = acc_in_vtx.ptr(rect_in_vtx); EdgeStruct* col_idxs = acc_col_idx.ptr(rect_col_idx); Vertex* new_pr = acc_new_pr.ptr(rect_new_pr); const E_ID* raw_rows = acc_raw_rows.ptr(rect_raw_rows); const V_ID* raw_cols = acc_raw_cols.ptr(rect_raw_cols); V_ID rowLeft = rect_row_ptr.lo[0], rowRight = rect_row_ptr.hi[0]; E_ID colLeft = rect_col_idx.lo[0], colRight = rect_col_idx.hi[0]; std::vector<V_ID> edges(colRight - colLeft + 1); for (E_ID e = 0; e < colRight - colLeft + 1; e++) edges[e] = raw_cols[e]; std::sort(edges.begin(), edges.end()); V_ID curVtx = edges[0], myInVtx = 0; for (E_ID e = 0; e < colRight - colLeft + 1; e++) { if (curVtx != edges[e]) { edges[myInVtx++] = curVtx; curVtx = edges[e]; } } edges[myInVtx++] = curVtx; checkCUDA(cudaMemcpy(in_vtxs, edges.data(), sizeof(V_ID) * myInVtx, cudaMemcpyHostToDevice)); // Add raw_weights if regions.size() == 7 const WeightType* raw_weights = NULL; if (regions.size() == 7) { const AccessorRO<WeightType, 1> acc_raw_weights(regions[6], FID_DATA); Rect<1> rect_raw_weights = runtime->get_index_space_domain( ctx, task->regions[6].region.get_index_space()); assert(rect_raw_weights == rect_raw_cols); assert(acc_raw_weights.accessor.is_dense_arbitrary(rect_raw_weights)); raw_weights = acc_raw_weights.ptr(rect_raw_weights.lo); } init_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, row_ptrs, col_idxs, raw_rows, raw_cols, raw_weights); checkCUDA(cudaDeviceSynchronize()); float value = std::sqrt(1.0f / K); for (V_ID n = 0; n + rowLeft <= rowRight; n++) { for (int i = 0; i < K; i++) new_pr[n].v[i] = value; } GraphPiece piece; piece.myInVtxs = myInVtx; piece.nv = graph->nv; piece.ne = graph->ne; // Allocate oldPrFb/newPrFb on the same memory as row_ptr std::set<Memory> memFB; regions[0].get_memories(memFB); assert(memFB.size() == 1); assert(memFB.begin()->kind() == Memory::GPU_FB_MEM); Realm::MemoryImpl* memImpl = Realm::get_runtime()->get_memory_impl(*memFB.begin()); Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; off_t offset = memFBImpl->alloc_bytes(sizeof(Vertex) * graph->nv); assert(offset >= 0); piece.oldPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0); offset = memFBImpl->alloc_bytes(sizeof(Vertex) * (rowRight - rowLeft + 1)); assert(offset >= 0); piece.newPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0); //checkCUDA(cudaMalloc(&(piece.oldPrFb), sizeof(float) * graph->nv)); //checkCUDA(cudaMalloc(&(piece.newPrFb), sizeof(float) * (rowRight-rowLeft+1))); return piece; }
the_stack
#include <cublas_v2.h> #include <cuda_runtime.h> #include "buffalo/cuda/utils.cuh" #include "buffalo/cuda/bpr/bpr.hpp" namespace cuda_bpr{ using std::invalid_argument; using namespace cuda_buffalo; using namespace thrust; static const float MAX_EXP = 6; __global__ void init_rngs_kernel(default_random_engine* rngs, int rand_seed){ rngs[blockIdx.x].seed(blockIdx.x + rand_seed); } __global__ void fill_rows_kernel(const int start_x, const int next_x, const int64_t* indptr, int* rows){ int64_t shift = start_x == 0? 0: indptr[start_x - 1]; for(int user=start_x+blockIdx.x; user<next_x; user+=gridDim.x){ size_t beg = user == 0? 0: indptr[user - 1] - shift; size_t end = indptr[user] - shift; for(size_t idx=beg; idx<end; ++idx) rows[idx] = user; // TODO make faster by sorting keys and using binary searching when verifying negative sample // sort(thrust::device, keys+beg, keys+end); } } __global__ void generate_samples_kernel(const int start_x, const int next_x, int* user, int* pos, int* neg, const int64_t* indptr, const float* dist, const int* rows, const int* keys, const int num_items, const size_t sample_size, const int num_neg_samples, const bool uniform_dist, const bool verify_neg, default_random_engine* rngs, const bool random_positive){ // prepare sampling int64_t beg = start_x == 0? 0: indptr[start_x - 1]; int64_t end = indptr[next_x - 1]; float Z = dist[num_items - 1]; // set random generator default_random_engine& rng = rngs[blockIdx.x]; uniform_int_distribution<int> item_dist1(0, num_items-1); // item distribution in case of uniform sampling uniform_real_distribution<float> item_dist2(0.0, Z); // item distribution in case of multinomial sampling uniform_int_distribution<int64_t> pos_dist(0, end-beg-1); // positive sampler for(int64_t s=blockIdx.x; s<(sample_size*num_neg_samples); s+=gridDim.x){ int64_t idx; if (random_positive) idx = pos_dist(rng); // draw positive index else idx = s % sample_size; // straight-forward positive index int _user = rows[idx]; // find user int _pos = keys[idx]; // get postive key // indexes for finding positive keys of target user size_t _beg = _user == 0? 0: indptr[_user - 1] - beg; size_t _end = indptr[_user] - beg; // negative sampling int _neg; while (true){ if (uniform_dist){ _neg = item_dist1(rng); } else{ float r = item_dist2(rng); _neg = upper_bound(thrust::device, dist, dist+num_items-1, r) - dist; } if (not verify_neg) break; bool exit = true; for(size_t _idx=_beg; _idx<_end; ++_idx){ if (_neg == keys[_idx]){ exit = false; break; } } if (exit) break; /* TODO make faster by sorting keys and using binary searching when verifying negative sample if (not binary_search(thrust::device, keys+_beg, keys+_end, _neg)) break; */ } // save samples user[s] = _user; pos[s] = _pos; neg[s] = _neg; } } __global__ void update_bpr_kernel(const int dim, const int vdim, float* P, float* Q, float* Qb, float* loss, const int* user, const int* pos, const int* neg, const size_t sample_size, const float lr, const bool compute_loss, const float reg_u, const float reg_i, const float reg_j, const float reg_b, const bool update_i, const bool update_j, const bool use_bias){ for (size_t s=blockIdx.x; s<sample_size; s+=gridDim.x){ // take a sample const int _user = user[s], _pos = pos[s], _neg = neg[s]; // target parameter vectors float* _P = P + vdim * _user; float* _Qp = Q + vdim * _pos; float* _Qn = Q + vdim * _neg; // compute scores float pos_score = dot(_P, _Qp); float neg_score = dot(_P, _Qn); if (use_bias){ pos_score += Qb[_pos]; neg_score += Qb[_neg]; } // prepare computing gradient float diff = neg_score - pos_score; diff = max(min(diff, MAX_EXP), -MAX_EXP); float e = expf(diff); float logit = e / (1 + e); // compute loss if (compute_loss and threadIdx.x == 0) loss[blockIdx.x] += logf(1 + e); if (threadIdx.x < dim){ // save parameter temporarily before update float tmp = _P[threadIdx.x]; //update user atomicAdd(_P + threadIdx.x, lr * (logit * (_Qp[threadIdx.x] - _Qn[threadIdx.x]) - reg_u * tmp)); //update item if (update_i) atomicAdd(_Qp + threadIdx.x, lr * (logit * tmp - reg_i * _Qp[threadIdx.x])); if (update_j) atomicAdd(_Qn + threadIdx.x, lr * (-logit * tmp - reg_j * _Qn[threadIdx.x])); } //update item bias if (threadIdx.x == 0 and use_bias){ if (update_i) atomicAdd(Qb + _pos, lr * (logit - reg_b * Qb[_pos])); if (update_j) atomicAdd(Qb + _neg, lr * (-logit - reg_b * Qb[_neg])); } __syncthreads(); } } __global__ void compute_bpr_sample_loss_kernel(const int dim, const int vdim, float* P, float* Q, float* Qb, float* loss, const int* user, const int* pos, const int* neg, const size_t sample_size, const bool use_bias){ for (size_t s=blockIdx.x; s<sample_size; s+=gridDim.x){ // take a sample const int _user = user[s], _pos = pos[s], _neg = neg[s]; // target parameter vectors float* _P = P + vdim * _user; float* _Qp = Q + vdim * _pos; float* _Qn = Q + vdim * _neg; // compute scores float pos_score = dot(_P, _Qp); float neg_score = dot(_P, _Qn); if (use_bias){ pos_score += Qb[_pos]; neg_score += Qb[_neg]; } float diff = neg_score - pos_score; diff = max(min(diff, MAX_EXP), -MAX_EXP); float e = expf(diff); // compute loss if (threadIdx.x == 0) loss[blockIdx.x] += logf(1 + e); __syncthreads(); } } CuBPR::CuBPR(){ logger_ = BuffaloLogger().get_logger(); num_processed_ = 0; CHECK_CUDA(cudaGetDevice(&devId_)); cudaDeviceProp prop; CHECK_CUDA(cudaGetDeviceProperties(&prop, devId_)); mp_cnt_ = prop.multiProcessorCount; int major = prop.major; int minor = prop.minor; cores_ = -1; switch (major){ case 2: // Fermi if (minor == 1) cores_ = mp_cnt_ * 48; else cores_ = mp_cnt_ * 32; break; case 3: // Kepler cores_ = mp_cnt_ * 192; break; case 5: // Maxwell cores_ = mp_cnt_ * 128; break; case 6: // Pascal if (minor == 1) cores_ = mp_cnt_ * 128; else if (minor == 0) cores_ = mp_cnt_ * 64; else INFO0("Unknown device type"); break; case 7: // Volta if (minor == 0) cores_ = mp_cnt_ * 64; else INFO0("Unknown device type"); break; default: INFO0("Unknown device type"); break; } if (cores_ == -1) cores_ = mp_cnt_ * 128; INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}", major, minor, mp_cnt_, cores_); } CuBPR::~CuBPR(){ hostP_ = nullptr; hostQ_ = nullptr; hostQb_ = nullptr; } bool CuBPR::parse_option(std::string opt_path, Json& j){ std::ifstream in(opt_path.c_str()); if (not in.is_open()) { return false; } std::string str((std::istreambuf_iterator<char>(in)), std::istreambuf_iterator<char>()); std::string err_cmt; auto _j = Json::parse(str, err_cmt); if (not err_cmt.empty()) { return false; } j = _j; return true; } bool CuBPR::init(std::string opt_path){ // parse options bool ok = parse_option(opt_path, opt_); if (ok){ // set options compute_loss_ = opt_["compute_loss_on_training"].bool_value(); dim_ = opt_["d"].int_value(); num_iters_ = opt_["num_iters"].int_value(); reg_u_ = opt_["reg_u"].number_value(); reg_i_ = opt_["reg_i"].number_value(); reg_j_ = opt_["reg_j"].number_value(); reg_b_ = opt_["reg_b"].number_value(); lr_ = opt_["lr"].number_value(); min_lr_ = opt_["min_lr"].number_value(); update_i_ = opt_["update_i"].bool_value(); update_j_ = opt_["update_j"].bool_value(); use_bias_ = opt_["use_bias"].bool_value(); rand_seed_ = opt_["rand_seed"].int_value(); random_positive_ = opt_["random_positive"].bool_value(); // virtual dimension vdim_ = (dim_ / WARP_SIZE) * WARP_SIZE; if (dim_ % WARP_SIZE > 0) vdim_ += WARP_SIZE; // set block count block_cnt_ = opt_["hyper_threads"].int_value() * (cores_ / vdim_); // get sampling option uniform_dist_ = opt_["sampling_power"].number_value() == 0.0; num_neg_samples_ = opt_["num_negative_samples"].int_value(); verify_neg_ = opt_["verify_neg"].bool_value(); if (compute_loss_){ hostLoss_.resize(block_cnt_); devLoss_.resize(block_cnt_); } rngs_.resize(block_cnt_ * vdim_); init_rngs_kernel<<<block_cnt_*vdim_, 1>>>(raw_pointer_cast(rngs_.data()), rand_seed_); } return ok; } void CuBPR::initialize_model( float* P, int P_rows, float* Q, float* Qb, int Q_rows, int64_t num_total_process, bool set_gpu) { // initialize parameters and send to gpu memory hostP_ = P; hostQ_ = Q; hostQb_ = Qb; P_rows_ = P_rows; Q_rows_ = Q_rows; num_total_process_ = num_total_process; if (set_gpu){ devP_.resize(P_rows_*vdim_); devQ_.resize(Q_rows_*vdim_); devQb_.resize(Q_rows_); copy(hostP_, hostP_+P_rows*vdim_, devP_.begin()); copy(hostQ_, hostQ_+Q_rows*vdim_, devQ_.begin()); copy(hostQb_, hostQb_+Q_rows, devQb_.begin()); } } void CuBPR::set_placeholder(int64_t* indptr, size_t batch_size) { indptr_.resize(P_rows_); copy(indptr, indptr+P_rows_, indptr_.begin()); rows_.resize(batch_size); keys_.resize(batch_size); user_.resize(batch_size*num_neg_samples_); pos_.resize(batch_size*num_neg_samples_); neg_.resize(batch_size*num_neg_samples_); CHECK_CUDA(cudaDeviceSynchronize()); } void CuBPR::set_cumulative_table(int64_t* sampling_table) { dist_.resize(Q_rows_); // CHECK_CUDA(cudaDeviceSynchronize()); copy(sampling_table, sampling_table+Q_rows_, dist_.begin()); } void CuBPR::synchronize(bool device_to_host) { // synchronize parameters between cpu memory and gpu memory if (device_to_host){ copy(devP_.begin(), devP_.end(), hostP_); copy(devQ_.begin(), devQ_.end(), hostQ_); copy(devQb_.begin(), devQb_.end(), hostQb_); } else{ copy(hostP_, hostP_+P_rows_*vdim_, devP_.begin()); copy(hostQ_, hostQ_+Q_rows_*vdim_, devQ_.begin()); copy(hostQb_, hostQb_+Q_rows_, devQb_.begin()); } CHECK_CUDA(cudaDeviceSynchronize()); } int CuBPR::get_vdim(){ return vdim_; } std::pair<double, double> CuBPR::partial_update(int start_x, int next_x, int64_t* indptr, int* keys){ double el; time_p beg_t, end_t; // copy data to gpu memory int64_t beg = start_x == 0? 0: indptr[start_x - 1]; int64_t end = indptr[next_x - 1]; size_t sample_size = end - beg; copy(keys, keys+sample_size, keys_.begin()); // set zeros for measuring losses if (compute_loss_) fill(devLoss_.begin(), devLoss_.end(), 0.0); CHECK_CUDA(cudaDeviceSynchronize()); beg_t = get_now(); // generate samples fill_rows_kernel<<<block_cnt_*vdim_, 1>>>(start_x, next_x, raw_pointer_cast(indptr_.data()), raw_pointer_cast(rows_.data())); CHECK_CUDA(cudaDeviceSynchronize()); end_t = get_now(); el = (GetTimeDiff(beg_t, end_t)) * 1000.0; TRACE("elapsed for filling rows: {} ms", el); beg_t = get_now(); // generate samples generate_samples_kernel<<<block_cnt_*vdim_, 1>>>(start_x, next_x, raw_pointer_cast(user_.data()), raw_pointer_cast(pos_.data()), raw_pointer_cast(neg_.data()), raw_pointer_cast(indptr_.data()), raw_pointer_cast(dist_.data()), raw_pointer_cast(rows_.data()), raw_pointer_cast(keys_.data()), Q_rows_, sample_size, num_neg_samples_, uniform_dist_, verify_neg_, raw_pointer_cast(rngs_.data()), random_positive_); CHECK_CUDA(cudaDeviceSynchronize()); end_t = get_now(); el = (GetTimeDiff(beg_t, end_t)) * 1000.0; TRACE("elapsed for sampling: {} ms", el); beg_t = get_now(); // decay lr double progressed = (double) num_processed_ / ((double) num_total_process_ * (double) num_iters_); double alpha = lr_ + (min_lr_ - lr_) * progressed; alpha = fmax(min_lr_, alpha); // update bpr update_bpr_kernel<<<block_cnt_, vdim_>>>(dim_, vdim_, raw_pointer_cast(devP_.data()), raw_pointer_cast(devQ_.data()), raw_pointer_cast(devQb_.data()), raw_pointer_cast(devLoss_.data()), raw_pointer_cast(user_.data()), raw_pointer_cast(pos_.data()), raw_pointer_cast(neg_.data()), sample_size*num_neg_samples_, alpha, compute_loss_, reg_u_, reg_i_, reg_j_, reg_b_, update_i_, update_j_, use_bias_); CHECK_CUDA(cudaDeviceSynchronize()); num_processed_ += sample_size; end_t = get_now(); el = (GetTimeDiff(beg_t, end_t)) * 1000.0; TRACE("elapsed for update: {} ms", el); // accumulate losses double loss = 0; if (compute_loss_){ copy(devLoss_.begin(), devLoss_.end(), hostLoss_.begin()); for (auto l: hostLoss_) loss += l; } CHECK_CUDA(cudaDeviceSynchronize()); return std::make_pair(loss, sample_size*num_neg_samples_); } double CuBPR::compute_loss(int num_loss_samples, int* user, int* pos, int* neg){ fill(devLoss_.begin(), devLoss_.end(), 0.0); CHECK_CUDA(cudaDeviceSynchronize()); // copy sample data to gpu memory copy(user, user+num_loss_samples, user_.begin()); copy(pos, pos+num_loss_samples, pos_.begin()); copy(neg, neg+num_loss_samples, neg_.begin()); // update bpr compute_bpr_sample_loss_kernel<<<block_cnt_, vdim_>>>(dim_, vdim_, raw_pointer_cast(devP_.data()), raw_pointer_cast(devQ_.data()), raw_pointer_cast(devQb_.data()), raw_pointer_cast(devLoss_.data()), raw_pointer_cast(user_.data()), raw_pointer_cast(pos_.data()), raw_pointer_cast(neg_.data()), num_loss_samples, use_bias_); CHECK_CUDA(cudaDeviceSynchronize()); // accumulate losses double loss = 0; copy(devLoss_.begin(), devLoss_.end(), hostLoss_.begin()); for (auto l: hostLoss_) loss += l; CHECK_CUDA(cudaDeviceSynchronize()); return loss / (double) num_loss_samples; } } // namespace cuda_bpr
the_stack
//#include "cuda_runtime.h" #include "cuda_error.h" #include "Mask.h" #include "Image.h" #include "TimeCompression.h" #include "Region.h" #include "BeadTracker.h" #include "BkgTrace.h" #include "EnumDefines.h" #include "SingleFlowFitKernels.h" #include "UtilKernels.h" #include "ConstantSymbolDeclare.h" using namespace std; #define COLLECT_SAMPLES #define SAMPLES_IN_ORDER template<typename T> __device__ inline float ComputeDcOffsetForCompressedTrace_v2 ( const T * fgBufferLocal, const size_t frameStride, const float* frameNumber, const float t_start, const float t_end, const int numFrames)//int flow_max) { // re-zero the traces // // Identical in output to above function, just extracts logic // from the inner loop to make it faster int pt; int start_pt=0; int end_pt=0; float cnt; float dc_zero=0.0f;// = ComputeDcOffset(fgPtr, t_start, t_end); float overhang_start=0.0f; float overhang_end=0.0f; int overhang_start_pt=1; int overhang_end_pt=1; // Timer tmr; /*ComputeDcOffset_params( frameNumber, numFrames, t_start, t_end, start_pt, end_pt, cnt, overhang_start, overhang_end); *///only needs to be done once maybe global object with locking mechanism? cnt = 0.0001f; start_pt = -1; end_pt = 0; overhang_start = 0.0f; overhang_end = 0.0f; // TODO: is this really "rezero frames before pH step start?" // this should be compatible with i_start from the nuc rise - which may change if we change the shape??? for (pt = 0; frameNumber[pt] < t_end; pt++) { end_pt = pt + 1; if (frameNumber[pt] > t_start) { if (start_pt == -1) start_pt = pt; // set to first point above t_start cnt += 1.0f; // should this be frames_per_point???? } } if (start_pt < 0) start_pt = 0; // make sure we don't index incorrectly // include values surrounding t_start & t_end weighted by overhang else { // This part is really broken. Fixing it makes things worse?? // the fraction overhang_start is > 1 int ohstart_pt = start_pt ? start_pt : 1; float den = (frameNumber[ohstart_pt] - frameNumber[ohstart_pt - 1]); if (den > 0) { overhang_start = (frameNumber[ohstart_pt] - t_start) / den; cnt += overhang_start; } } if ((end_pt < numFrames) && (end_pt > 0)) { // timecp->frameNumber[end_pt-1] <= t_end < timecp->frameNumber[end_pt] // normalize to a fraction in the spirit of "this somehow makes it worse float den = (frameNumber[end_pt] - frameNumber[end_pt - 1]); if (den > 0) { overhang_end = (t_end - frameNumber[end_pt - 1]) / den; cnt += overhang_end; } } if(start_pt > 0) overhang_start_pt = start_pt-1; else overhang_start_pt = 0; if(end_pt > 0 && end_pt < numFrames) overhang_end_pt = end_pt; else overhang_end_pt=0; dc_zero=0; for (pt = start_pt; pt < end_pt; pt++) dc_zero += (fgBufferLocal[pt]); // add end interpolation parts dc_zero += overhang_start*(fgBufferLocal[overhang_start_pt]); dc_zero += overhang_end *(fgBufferLocal[overhang_end_pt]); // make it into an average dc_zero /= cnt; return dc_zero; /* // now, subtract the dc offset from all the points FG_BUFFER_TYPE dc_zero_s = dc_zero; for (int pt = 0;pt < numFrames;pt++){ // over real data *fgBuffer = fgBufferLocal[pt] - dc_zero_s; fgBuffer += frameStride; } */ } //shifts values from in to out. shift by determines direction and shift amoount // shiftBy < 0 shift left, shiftBy > 0 shifts right. __host__ __device__ inline void ShiftUncompressedTraceBy(float * out, const float * in, const float shift, int frames) { if(shift != 0){ int shiftwhole = (int)shift; //Truncate to get number of whole frames to shift. int nearFrame = -shiftwhole; //determine the closer of the two frames to interpolate in-between int farFrame = nearFrame + ((shift < 0)?(1):(-1)); //determine the frame further away. interpolate between near and far float farFrac = abs(shift-(float)shiftwhole); //determine fraction of far frame float nearFrac = 1.0f - farFrac; //and fraction of near frame used for interpolation // cout << "nearFrame "<< nearFrame <<" nearFrac "<< nearFrac <<" farFrame " << farFrame <<" farFrac "<<farFrac <<endl; int lastframe = frames-1; // useful input frames range from 0 to frames-1 for(int i=0; i<frames; i++){ int nframe = nearFrame; int fframe = farFrame; if(nframe < 0 || fframe < 0) nframe = fframe = 0; if(nframe > lastframe || fframe > lastframe) //handle right boundary, use last frame for left and right when right is out of bounds nframe = fframe = lastframe; out[i] =in[nframe]*nearFrac + in[fframe]*farFrac; nearFrame++; farFrame++; } }else{ for(int i=0; i<frames; i++){ out[i] = in[i]; } } } template <typename T> __device__ inline void SmoothWeightedNeighborAverage(T * out, T * in, int n, float weight) { if(in == out){ //in place for(int i = 1; i < n-1; i++) { T last = in[0]; T current = in[1]; T next; for(int i = 1; i < n-1; i++) { next = in[i+1]; T val = (current + (last + next)*weight)/(1+2*weight); if(i == 1) out[0] = val; out[i] = val; if(i == n-2) out[i+1] = val; else{ last = current; current = next; } } } }else{ for(int i = 1; i < n-1; i++) { T val = (in[i] + (in[i-1] + in[i+1])*weight)/(1+2*weight); if(i == 1) out[0] = val; out[i] = val; if(i == n-2) out[i+1] = val; } } } __device__ inline void ShiftUncompressedTraceAndAccum(float * out, const float * in, const float shift, int frames, float * accum_out = NULL) { if(shift != 0){ int shiftwhole = (int)shift; //Truncate to get number of whole frames to shift. int nearFrame = -shiftwhole; //determine the closer of the two frames to interpolate in-between int farFrame = nearFrame + ((shift < 0)?(1):(-1)); //determine the frame further away. interpolate between near and far float farFrac = abs(shift-(float)shiftwhole); //determine fraction of near frame float nearFrac = 1.0f - farFrac; //and fraction of farm frame used for interpolation // cout << "nearFrame "<< nearFrame <<" nearFrac "<< nearFrac <<" farFrame " << farFrame <<" farFrac "<<farFrac <<endl; int lastframe = frames-1; // useful input frames range from 0 to frames-1 for(int i=0; i<frames; i++){ int nframe = nearFrame; int fframe = farFrame; if(nframe < 0 || fframe < 0) nframe = fframe = 0; if(nframe > lastframe || fframe > lastframe) //handle right boundary, use last frame for left and right when right is out of bounds nframe = fframe = lastframe; float tmp = in[nframe]*nearFrac + in[fframe]*farFrac; out[i] = tmp; if(accum_out != NULL){ accum_out[i] += tmp; } nearFrame++; farFrame++; } }else{ for(int i=0; i<frames; i++){ out[i] = in[i]; if(accum_out != NULL){ accum_out[i] += in[i]; } } } } //Uncompresses the VFC image, shifts it by t0 and re-compresses according to the //the frames per point passed in framesPerPoint //works on one single well in the raw.image at position l_coord //all other passed pointers and values are specific to the well // const symbols: // ConstFrmP constant symbol needs to be initialized for this kernel /* __device__ inline int LoadImgWOffset( FG_BUFFER_TYPE * fgptr, //now points to frame 0 of current bead in image short * image, const float * frameNumber, const int * framesPerPoint, int nfrms, int frameStride, float t0Shift, bool isEmpty, float * emptyTraceSum, const float time_start, const PerFlowParamsRegion & perFlowRegP, size_t idx ) { int my_frame = 0,compFrm,curFrms,curCompFrms; float tmpAdder; //int interf,lastInterf=-1; FG_BUFFER_TYPE fgTmp[MAX_COMPRESSED_FRAMES_GPU]; float traceTmp[MAX_UNCOMPRESSED_FRAMES_GPU+4]; FG_BUFFER_TYPE lastVal; GetUncompressTrace(traceTmp+4, ConstFrmP, image, frameStride ); float * emptyAccumPtr = NULL; if(isEmpty) emptyAccumPtr = emptyTraceSum; //accumulate empty traces in this function so we do not have to touch all the frames again ShiftUncompressedTraceAndAccum(traceTmp,traceTmp+4,-t0Shift,ConstFrmP.getUncompFrames(),emptyAccumPtr); #if UNCOMPRESSED_REZERO float toffset; if(isEmpty) toffset = MAGIC_OFFSET_FOR_EMPTY_TRACE; else toffset = perFlowRegP.getSigma(); float dcOffset = ComputeDcOffsetForUncompressedTrace(traceTmp,ConstFrmP.getUncompFrames(),time_start, perFlowRegP.getTMidNuc()-toffset); #endif ; my_frame = 0;//ConstFrmP.interpolatedFrames[startFrame]-1; compFrm = 0; tmpAdder=0.0f; curFrms=0; curCompFrms=framesPerPoint[compFrm]; while ((my_frame < ConstFrmP.getUncompFrames()) && ((compFrm < nfrms))) { tmpAdder += traceTmp[my_frame]; if(++curFrms >= curCompFrms) { tmpAdder /= curCompFrms; #if UNCOMPRESSED_REZERO tmpAdder -= dcOffset; #endif lastVal = (FG_BUFFER_TYPE)(tmpAdder); //Maybe use rintf or round to get more precision fgTmp[compFrm] = lastVal; compFrm++; curCompFrms = framesPerPoint[compFrm]; curFrms=0; tmpAdder= 0.0f; } my_frame++; } if(compFrm > 0 ) { #if !UNCOMPRESSED_REZERO //this will produce garbage or crash if frameNumber is not passed to kernel float toffset; if(isEmpty) toffset = MAGIC_OFFSET_FOR_EMPTY_TRACE; else toffset = perFlowRegP.getSigma(); float dcOffset = ComputeDcOffsetForCompressedTrace(fgTmp,1,frameNumber,time_start, perFlowRegP.getTMidNuc()-toffset, nfrms ); for(int i=0; i < nfrms ;i++){ fgptr[frameStride*i] = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; } #else for(int i=0; i < nfrms ;i++){ fgptr[frameStride*i] = (i<compFrm)?(fgTmp[i]):(lastVal); } #endif } return ConstFrmP.getUncompFrames(); } */ /* __device__ inline int LoadImgWOffset_WithRegionalSampleExtraction( FG_BUFFER_TYPE * fgptr, //now points to frame 0 of current bead in image short * image, const float * frameNumber, const int * framesPerPoint, int nfrms, int frameStride, float t0Shift, bool isEmpty, float * emptyTraceSum, const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, size_t idx, //inputs const float * BeadParamCube, const unsigned short * BeadStateMask, //meta data const int * SampleRowPtr, int * SampleRowCounter, //outputs unsigned short * SampleStateMask, short * SampleCompressedTraces, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() float * SampleParamCube, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() SampleCoordPair * SampleCoord, SampleCoordPair myLocation ) { int my_frame = 0,compFrm,curFrms,curCompFrms; float tmpAdder; float dcOffset = 0; //int interf,lastInterf=-1; FG_BUFFER_TYPE fgTmp[MAX_COMPRESSED_FRAMES_GPU]; float traceTmp[MAX_UNCOMPRESSED_FRAMES_GPU+4]; FG_BUFFER_TYPE lastVal; GetUncompressTrace(traceTmp+4, ConstFrmP, image, frameStride ); float * emptyAccumPtr = NULL; if(isEmpty) emptyAccumPtr = emptyTraceSum; //accumulate empty traces in this function so we do not have to touch all the frames again ShiftUncompressedTraceAndAccum(traceTmp,traceTmp+4,-t0Shift,ConstFrmP.getUncompFrames(),emptyAccumPtr); #if UNCOMPRESSED_REZERO float tstart = constRegP->getTimeStart(); float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForUncompressedTrace(traceTmp,ConstFrmP.getUncompFrames(),tstart, tend); #endif ; my_frame = 0;//ConstFrmP.interpolatedFrames[startFrame]-1; compFrm = 0; tmpAdder=0.0f; curFrms=0; curCompFrms=framesPerPoint[compFrm]; while ((my_frame < ConstFrmP.getUncompFrames()) && ((compFrm < nfrms))) { tmpAdder += traceTmp[my_frame]; if(++curFrms >= curCompFrms) { tmpAdder /= curCompFrms; #if UNCOMPRESSED_REZERO tmpAdder -= dcOffset; #endif lastVal = (FG_BUFFER_TYPE)(tmpAdder); //Maybe use rintf or round to get more precision fgTmp[compFrm] = lastVal; compFrm++; curCompFrms = framesPerPoint[compFrm]; curFrms=0; tmpAdder= 0.0f; } my_frame++; } if(compFrm > 0 ) { #if !UNCOMPRESSED_REZERO //this will produce garbage or crash if frameNumber is not passed to kernel float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForCompressedTrace(fgTmp,1,frameNumber,constRegP->getTimeStart(), tend, nfrms ); //for(int i=0; i < nfrms ;i++){ //fgptr[frameStride*i] = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; //} #endif int writeOffset = 0; int SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions(); bool isSample = false; if(Match(BeadStateMask,(BkgModelMaskType)(BkgMaskRegionalSampled|BkgMaskHighQaulity), true)){ isSample = true; //determine offset for current bead in sample set writeOffset = (*SampleRowPtr) + atomicAdd(SampleRowCounter, 1); SampleStateMask += writeOffset; SampleCompressedTraces += writeOffset; SampleParamCube += writeOffset; SampleCoord += writeOffset; // printf("tstart %f, tend %f, dcoffset %f, numframes %d, nframes %f,%f,%f,%f,%f,%f \n", constRegP->getTimeStart(), tend, dcOffset, nfrms, // frameNumber[0],frameNumber[1],frameNumber[2],frameNumber[3],frameNumber[4],frameNumber[5]); } for(int i=0; i < nfrms ;i++){ #if !UNCOMPRESSED_REZERO float val = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; #else float val = (i<compFrm)?(fgTmp[i]):(lastVal); #endif fgptr[frameStride*i] = val; if(isSample) SampleCompressedTraces[SamplePlaneStride*i] = val; //copy current bead information to sample set } if(isSample){ //copy current bead information to sample set *SampleCoord = myLocation; *SampleStateMask = *BeadStateMask; for(int i = 0; i < Bp_NUM_PARAMS; i++) SampleParamCube[SamplePlaneStride*i] = BeadParamCube[frameStride*i]; } } return ConstFrmP.getUncompFrames(); } */ /* __device__ inline int LoadImgWOffset_WithRegionalSampleExtractionInOrder( FG_BUFFER_TYPE * fgptr, //now points to frame 0 of current bead in image short * image, const float * frameNumber, const int * framesPerPoint, int nfrms, int frameStride, float t0Shift, bool isEmpty, bool isSample, float * emptyTraceSum, const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, size_t idx, //inputs const float * BeadParamCube, const unsigned short * BeadStateMask, //meta data //const int * SampleRowPtr, //outputs unsigned short * SampleStateMask, short * SampleCompressedTraces, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() float * SampleParamCube, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() SampleCoordPair * SampleCoord, SampleCoordPair myLocation ) { int my_frame = 0,compFrm,curFrms,curCompFrms; float tmpAdder; float dcOffset = 0; //int interf,lastInterf=-1; FG_BUFFER_TYPE fgTmp[MAX_COMPRESSED_FRAMES_GPU]; float traceTmp[MAX_UNCOMPRESSED_FRAMES_GPU+4]; FG_BUFFER_TYPE lastVal; GetUncompressTrace(traceTmp+4, ConstFrmP, image, frameStride ); float * emptyAccumPtr = NULL; if(isEmpty) emptyAccumPtr = emptyTraceSum; //accumulate empty traces in this function so we do not have to touch all the frames again ShiftUncompressedTraceAndAccum(traceTmp,traceTmp+4,-t0Shift,ConstFrmP.getUncompFrames(),emptyAccumPtr); #if UNCOMPRESSED_REZERO float tstart = constRegP->getTimeStart(); float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForUncompressedTrace(traceTmp,ConstFrmP.getUncompFrames(),tstart, tend); #endif ; my_frame = 0;//ConstFrmP.interpolatedFrames[startFrame]-1; compFrm = 0; tmpAdder=0.0f; curFrms=0; curCompFrms=framesPerPoint[compFrm]; while ((my_frame < ConstFrmP.getUncompFrames()) && ((compFrm < nfrms))) { tmpAdder += traceTmp[my_frame]; if(++curFrms >= curCompFrms) { tmpAdder /= curCompFrms; #if UNCOMPRESSED_REZERO tmpAdder -= dcOffset; #endif lastVal = (FG_BUFFER_TYPE)(tmpAdder); //Maybe use rintf or round to get more precision fgTmp[compFrm] = lastVal; compFrm++; curCompFrms = framesPerPoint[compFrm]; curFrms=0; tmpAdder= 0.0f; } my_frame++; } if(compFrm > 0 ) { #if !UNCOMPRESSED_REZERO //this will produce garbage or crash if frameNumber is not passed to kernel float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForCompressedTrace(fgTmp,1,frameNumber,constRegP->getTimeStart(), tend, nfrms ); //for(int i=0; i < nfrms ;i++){ //fgptr[frameStride*i] = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; //} #endif int SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions(); for(int i=0; i < nfrms ;i++){ #if !UNCOMPRESSED_REZERO float val = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; #else float val = (i<compFrm)?(fgTmp[i]):(lastVal); #endif fgptr[frameStride*i] = val; if(isSample) SampleCompressedTraces[SamplePlaneStride*i] = val; //copy current bead information to sample set } if(isSample){ //copy current bead information to sample set *SampleCoord = myLocation; *SampleStateMask = *BeadStateMask; for(int i = 0; i < Bp_NUM_PARAMS; i++) SampleParamCube[SamplePlaneStride*i] = BeadParamCube[frameStride*i]; } } return ConstFrmP.getUncompFrames(); } */ __device__ inline int LoadImgWOffset_OnTheFlyCompressionWithRegionalSampleExtractionInOrder( FG_BUFFER_TYPE * fgptr, //now points to frame 0 of current bead in image short * image, const float * frameNumber, const int * framesPerPoint, int nfrms, int frameStride, float t0Shift, bool isEmpty, bool isSample, float * emptyTraceSum, const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, size_t idx, //inputs const float * BeadParamCube, const unsigned short * BeadStateMask, //meta data //const int * SampleRowPtr, //outputs unsigned short * SampleStateMask, short * SampleCompressedTraces, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() float * SampleParamCube, //SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions() SampleCoordPair * SampleCoord, SampleCoordPair myLocation ) { int t0ShiftWhole; float t0ShiftFrac; int my_frame = 0,compFrm,curFrms,curCompFrms; double prev=0.0f; double next; double tmpAdder; double mult; int interf,lastInterf=-1; //allow for negative t0Shift (faster traces) if(t0Shift < 0-(ConstFrmP.getUncompFrames()-2)) t0Shift = 0-(ConstFrmP.getUncompFrames()-2); if(t0Shift > (ConstFrmP.getUncompFrames()-2)) t0Shift = (ConstFrmP.getUncompFrames()-2); //by using floor() instead of (int) here //we now can allow for negative t0Shifts t0ShiftWhole=floor(t0Shift); t0ShiftFrac = t0Shift - (float)t0ShiftWhole; // skip t0ShiftWhole input frames, // if T0Shift whole < 0 start at frame 0; int StartAtFrame = (t0ShiftWhole < 0)?(0):(t0ShiftWhole); float dcOffset = 0; //int interf,lastInterf=-1; FG_BUFFER_TYPE fgTmp[MAX_COMPRESSED_FRAMES_GPU]; float traceTmp[MAX_UNCOMPRESSED_FRAMES_GPU+4]; FG_BUFFER_TYPE lastVal = 0; float * emptyAccumPtr = NULL; if(isEmpty){ emptyAccumPtr = emptyTraceSum; GetUncompressTrace(traceTmp+4, ConstFrmP, image, frameStride ); ShiftUncompressedTraceAndAccum(traceTmp,traceTmp+4,-t0Shift,ConstFrmP.getUncompFrames(),emptyAccumPtr); } #if UNCOMPRESSED_REZERO float tstart = constRegP->getTimeStart(); float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForUncompressedTrace(traceTmp,ConstFrmP.getUncompFrames(),tstart, tend); #endif ; my_frame = ConstFrmP.interpolatedFrames[StartAtFrame]-1; compFrm = 0; tmpAdder=0.0f; curFrms=0; curCompFrms=framesPerPoint[compFrm]; interf= ConstFrmP.interpolatedFrames[my_frame]; next = image[frameStride*interf]; while ((my_frame < ConstFrmP.getUncompFrames()) && (compFrm < nfrms)) { interf= ConstFrmP.interpolatedFrames[my_frame]; if(interf != lastInterf) //always true { prev = next; next = image[frameStride*interf]; } // interpolate mult = ConstFrmP.interpolatedMult[my_frame] - (t0ShiftFrac/ConstFrmP.interpolatedDiv[my_frame]); tmpAdder += ( (prev)-(next) ) * (mult) + (next); if(++curFrms >= curCompFrms) { tmpAdder /= curCompFrms; fgTmp[compFrm] = (FG_BUFFER_TYPE)(tmpAdder); compFrm++; if(compFrm < ConstFrmP.getMaxCompFrames()) curCompFrms = framesPerPoint[compFrm]; // if(idx == 0) printf( "my_frame %d compFrm %d curCompFrms %d \n", my_frame,compFrm,curCompFrms); curFrms=0; tmpAdder = 0.0f; } //reuse my_frame while not compensated for negative t0 shifts //T0ShiftWhole will be < 0 for negative t0 if(t0ShiftWhole < 0) t0ShiftWhole++; else my_frame++; }//while if(compFrm > 0 ) { #if !UNCOMPRESSED_REZERO //this will produce garbage or crash if frameNumber is not passed to kernel float tend; if(isEmpty) tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; else tend = perFlowRegP->getTMidNuc()- perFlowRegP->getSigma(); dcOffset = ComputeDcOffsetForCompressedTrace(fgTmp,1,frameNumber,constRegP->getTimeStart(), tend, nfrms ); // BeadParamCube[frameStride*BpDebugDCOffset] = dcOffset; //ToDo: remove DEBUG only //for(int i=0; i < nfrms ;i++){ //fgptr[frameStride*i] = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; //} #endif int SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions(); for(int i=0; i < nfrms ;i++){ #if !UNCOMPRESSED_REZERO float val = ((i<compFrm)?(fgTmp[i]):(lastVal)) - dcOffset; #else float val = (i<compFrm)?(fgTmp[i]):(lastVal); #endif fgptr[frameStride*i] = val; if(isSample) SampleCompressedTraces[SamplePlaneStride*i] = val; //copy current bead information to sample set lastVal = val; } if(isSample){ //copy current bead information to sample set *SampleCoord = myLocation; *SampleStateMask = *BeadStateMask; for(int i = 0; i < Bp_NUM_PARAMS; i++) SampleParamCube[SamplePlaneStride*i] = BeadParamCube[frameStride*i]; } } return ConstFrmP.getUncompFrames(); } //Uncompresses the VFC image, shifts it by t0 and re-compresses according to the //the frames per point passed in compFrms //works on one single well in the raw.image at position l_coord //all other passed pointers and values are specific to the well __device__ inline void AverageRezeroTshiftAndCompressEmptyLocal( float * emptyTraceAvgGlobal, float * emptyAvgLocal, int count, const int * framesPerPoint, int nfrms, int usedUncomFrames, const float time_start, const PerFlowParamsRegion * perFlowRegP ) { float tmpAdder = 0.0f; float lastVal = 0.0f; int my_frame = 0; int compFrm = 0; int curFrms=0; int curCompFrms=framesPerPoint[compFrm]; float * compPtr = emptyTraceAvgGlobal; if(count != 0){ if(count > 1){ for(int f = 0; f < usedUncomFrames ; f++){ emptyAvgLocal[f] /= count; } } // printf( "\n"); float dcOffset = ComputeDcOffsetForUncompressedTrace(emptyAvgLocal, ConstFrmP.getUncompFrames() ,time_start, perFlowRegP->getTMidNuc() - MAGIC_OFFSET_FOR_EMPTY_TRACE); while ((my_frame < ConstFrmP.getUncompFrames()-1) && ((compFrm < nfrms))) { float t= my_frame; float fn=t- perFlowRegP->getTshift(); if (fn < 0.0f) fn = 0.0f; if (fn > (ConstFrmP.getUncompFrames()-2)) fn = ConstFrmP.getUncompFrames()-2; int ifn= (int)fn; float frac = fn - ifn; //rezero and shift in one step tmpAdder += iPc(emptyAvgLocal[ifn], emptyAvgLocal[ifn+1],frac, dcOffset); //and then compress if(++curFrms >= curCompFrms) { tmpAdder /= curCompFrms; lastVal = tmpAdder; compPtr[compFrm] = lastVal; compFrm++; curCompFrms = framesPerPoint[compFrm]; curFrms=0; tmpAdder= 0.0f; } my_frame++; } } if(compFrm >= 0 && compFrm < nfrms) { for(;compFrm < nfrms;compFrm++){ compPtr[compFrm] = lastVal; } } } __device__ inline float AverageRezeroEmptyNoCompression( float * emptyTraceAvgGlobal, float * emptyAvgLocal, const PerFlowParamsRegion * perFlowRegP, const ConstantParamsRegion * constRegP, const int count ) { if(count != 0){ if(count > 1){ for(int f = 0; f < ConstFrmP.getUncompFrames() ; f++){ emptyAvgLocal[f] /= count; } } // printf( "\n"); float dcOffset = ComputeDcOffsetForUncompressedTrace(emptyAvgLocal, ConstFrmP.getUncompFrames() ,constRegP->getTimeStart(), perFlowRegP->getTMidNuc() - MAGIC_OFFSET_FOR_EMPTY_TRACE); for(int f = 0; f < ConstFrmP.getUncompFrames() ; f++){ emptyTraceAvgGlobal[f] = emptyAvgLocal[f] - dcOffset; } return dcOffset; }else{ for(int f = 0; f < ConstFrmP.getUncompFrames() ; f++){ emptyTraceAvgGlobal[f] = 0.0f; } } return 0.0f; } // one block per region (e.g. for P1: regionX =6, regionY=6. => 36 regions) // block width has to be a warp size or a 2^k fraction of it // need one shared memory value per thread to calculate sum // kernel creates meta data itself: // number of life beads per region (and warp/thread block-row) // t0 average gets calculated on the fly // t0map not needed since t0map values directly calculated on the fly from t0est // const symbols: // ImgRegP constant symbol needs to be initialized for this kernel __global__ void GenerateT0AvgAndNumLBeads_New( unsigned short * RegionMask, const unsigned short* bfmask, const unsigned short* BeadStateMask, const float* t0Est, int * SampleRowPtr, int * NumSamples, int * lBeadsRegion, //numLbeads of whole region float * T0avg // T0Avg per REgion //ToDo check if this is really needed of if updating the T0Est would be better ) { //if(blockDim.x != warpsize) return; // block width needs to be warp size extern __shared__ int smBaseGenerateT0AvgAndNumLBeads[]; //one element per thread; int* sm_base = smBaseGenerateT0AvgAndNumLBeads; int* sm = sm_base + blockDim.x * threadIdx.y + threadIdx.x; //map sm to threads in threadblock int* sm_sample_base = sm_base + blockDim.x * blockDim.y; int* sm_sample = sm + blockDim.x * blockDim.y; //region id and base offset const size_t regId = blockIdx.y * gridDim.x + blockIdx.x; // block x & y are a grid of regions //set inital coordinates in region to work on size_t rx = threadIdx.x; //region x to work on size_t ry = threadIdx.y; //region row to work on size_t idx = ImgRegP.getWellIdx(regId,rx,ry); //window size == block width ---> should be == warpsize or warphalf const size_t windowSize = blockDim.x; //window size to slide accross row const size_t nextWorkRowStride = ImgRegP.getImgW() * blockDim.y; //stride to get to next row to work on //update pointers for first bead to work on bfmask += idx; BeadStateMask += idx; const float * t0EstPtr = t0Est + idx; //update region pointers to point to current region RegionMask += regId; lBeadsRegion += regId; // point to absolute num l beads for region T0avg += regId; SampleRowPtr += regId * ImgRegP.getRegH(); NumSamples += regId; int * SamplesThisRow = SampleRowPtr + ry; //determine dimensions and handle border regions const size_t regWidth = ImgRegP.getRegW(regId); const size_t regHeight = ImgRegP.getRegH(regId); // if(threadIdx.x == 0 && threadIdx.y == 0) printf ("RegId: %d dim: %dx%d, offsetsPerRow: %d\n", regId, regWidth, regHeight, offsetsPerRow); float t0Sum = 0.0f; int t0Cnt = 0; int sampleCnt = 0; *sm = 0; *RegionMask = (RegionStateMask)RegionMaskLive; // iterate over rows of region while(ry < regHeight){ size_t windowStart = 0; const unsigned short* bfmaskRow = bfmask; const unsigned short* bsMaskRow = BeadStateMask; const float* t0EstRow = t0EstPtr; *sm_sample = 0; //slide warp/window across row and create sum for of num live beads for each warp while(windowStart < regWidth){ if(rx < regWidth){ //if bead still in reagion set sm according to mask *sm += Match(bfmaskRow,(MaskType)MaskLive)?(1):(0); //add one to sm if bead is live if(!Match(bfmaskRow,(MaskType) (MaskPinned | MaskIgnore | MaskExclude))){ t0Sum += *t0EstRow; //sum up T0 for all the valid beads this thread visits t0Cnt ++; //increase t0 count to calculate average. } if(Match(bsMaskRow, (BkgModelMaskType)(BkgMaskRegionalSampled|BkgMaskHighQaulity),true )){ *sm_sample += 1; sampleCnt ++; } } //slide window rx += windowSize; windowStart += windowSize; bfmaskRow += windowSize; bsMaskRow += windowSize; t0EstRow += windowSize; } //row done WarpSumNoSync(sm_sample); if(threadIdx.x==0) *SamplesThisRow = *sm_sample; //store number of samples to global //move threads to first bead of next row to work on rx = threadIdx.x; ry += blockDim.y; SamplesThisRow += blockDim.y; bfmask += nextWorkRowStride; BeadStateMask += nextWorkRowStride; t0EstPtr += nextWorkRowStride; }//region done int numlBeads = ReduceSharedMemory(sm_base, sm); //thread 0 write number of live beads to global if(threadIdx.x==0 && threadIdx.y ==0){ *lBeadsRegion = numlBeads; if(numlBeads == 0) *RegionMask = RegionMaskNoLiveBeads; } //if no live beads in region -> die if(numlBeads == 0) return; __syncthreads(); //calculate t0 Sum for region float * smf_base = (float*)sm_base; float* smf = (float*)sm; *smf = t0Sum; //put t0 partial sums into shared //reduce partial sums inside each warp to one, sum only correct for threads with Idx.x == 0 in each warp t0Sum = ReduceSharedMemory(smf_base, smf); __syncthreads(); //calculate t0 cnt for region *sm = t0Cnt; //put t0 partial sums into shared //reduce partial sums inside each warp to one, sum only correct for threads with Idx.x == 0 in each warp t0Cnt = ReduceSharedMemory(sm_base, sm); unsigned short regMaskValue = *RegionMask; float t0avgRegion = t0Sum/t0Cnt; // each thread now has correct t0Avg for the region if(threadIdx.x == 0 && threadIdx.y == 0){ // printf("GPU regId %u t0_sum %f t0_cnt: %d t0 avg: %f \n" , regId, t0Sum, t0Cnt, t0avgRegion); *T0avg = t0avgRegion; //ToDo: determine what a minimum T0 average should be and what is definitely bogus if(t0avgRegion < THRESHOLD_T0_AVERAGE){ regMaskValue |= RegionMaskT0AverageBelowThreshold; if(t0avgRegion == -1) regMaskValue |= RegionMaskNoT0Average; } } //calculate t0 cnt for region *sm_sample = sampleCnt; //put t0 partial sums into shared //reduce partial sums inside each warp to one, sum only correct for threads with Idx.x == 0 in each warp sampleCnt = ReduceSharedMemory(sm_sample_base, sm_sample); if(threadIdx.x == 0 && threadIdx.y == 0){ // printf("GPU regId %u t0_sum %f t0_cnt: %d t0 avg: %f \n" , regId, t0Sum, t0Cnt, t0avgRegion); *NumSamples = sampleCnt; //ToDo: determine minimum number of samples needed! if(sampleCnt < THRESHOLD_NUM_REGION_SAMPLE){ regMaskValue |= RegionMaskNumRegionSamplesBelowThreshold; if(sampleCnt <= 0) regMaskValue |= RegionMaskNoRegionSamples; } //write updated region mask value back to global *RegionMask = regMaskValue; } //idx = threadIdx.x + threadIdx.y * blockDim.x; //size_t blockStart = 0; //size_t blockSize = blockDim.x * blockDim.y; //size_t windowSize = blockDim.x*blockDim.y; int offset = 0; int next = 0; __threadfence_block(); //not efficient at all, but kernel is executed only once and runs in < 1ms on a k20 for a whole block if(threadIdx.x == 0 && threadIdx.y == 0){ for( idx =0; idx < ImgRegP.getRegH(); idx++){ if(idx<ImgRegP.getRegH(regId)) next += SampleRowPtr[idx]; SampleRowPtr[idx] = offset; offset = next; } } //Todo let's see what else could be done here... } // grid size is grid.x = regionsX = 6; // grid.y = imgHeight = 1288/blockDim.y // block size block.x = warpsize = 32 (or maybe a warp half: 16, needs to be tested) // block.y = TBD = probably 4 or so, // one warp (fraction of a warp) works on on row of a region, where the warps slide // across the row with a stride of blockdim.x*blockdim.y // const symbols: // ImgRegP constant symbol needs to be initialized for this kernel // ConstFrmP constant symbol needs to be initialized for this kernel //launch bounds: //K20 //regs per SM: 65536 //blocks for 32 regs: 65536/(32 *128) = 16.25 __global__ __launch_bounds__(128, 16) void GenerateAllBeadTraceEmptyFromMeta_k ( unsigned short * RegionMask, short * img, //perwell input and output const unsigned short * bfmask, //per well const float* t0Est, //per well const float * frameNumberRegion, // from timing compression const int * framesPerPointRegion, const size_t * nptsRegion, //per region const int * numlBeadsRegion, const float * T0avg, // ToDo: try already subtract T0 after calculating the average so this would not be needed here anymore! const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, float * EmptyTraceSumRegionTBlock, // has to be initialized to 0!! will contain sum of all empty trace frames for each row in a region int * EmptyTraceCountRegionTBlock, // has to be initialized to 0!! will contain number of empty traces summed up for each row in a region int * EmptyTraceComplete, //has to be initialized to 0!! completion counter per region for final sum ToDo: figure out if we can do without it //regional sample //inputs const float * BeadParamCube, const unsigned short * BeadStateMask, //meta data const int *SampleRowPtr, int * SampleRowCounter, //outputs unsigned short * SampleStateMask, short * SampleCompressedTraces, float * SampleParamCube, SampleCoordPair * SampleCoord ) { extern __shared__ int smemGenBeadTraces[]; //4 Byte per thread SampleCompressedTraces = ConstHistCol.getWriteBufferSampleTraces(); //if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x ==0 && threadIdx.y ==0) printf("Device SampleComressedTrace: %p\n",SampleCompressedTraces); //determine shared memory pointers for each thread and the the base pointer for each warp (or warp half) int * sm_base = smemGenBeadTraces; int * sm_warp_base = sm_base + threadIdx.y*blockDim.x; int * sm = sm_warp_base + threadIdx.x; float * smf_base = (float*)sm_base; //float * smf_warp_base = (float*) sm_warp_base; float * smf = (float*) sm; //float * emptyTraceSum = (float*)(sm_base + (blockDim.x*blockDim.y)); //Todo see if summing up in for whole block inst4ead of in local per thread would give a benefit here float emptyTraceSum[MAX_UNCOMPRESSED_FRAMES_GPU] = {0}; //determine region location //const size_t regionCol = blockIdx.x; //const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); size_t ix = blockIdx.x * ImgRegP.getRegW() + threadIdx.x; // x coordinate determined through the column index of he region, the region width and the therad idx in x const size_t iy = (blockIdx.y*blockDim.y) + threadIdx.y; // y coordinate defined by block y idx const size_t regId = ImgRegP.getRegId(ix,iy); // regionRow*ImgRegP.getGridDimX()+regionCol; RegionMask += regId; int numLBeads = numlBeadsRegion[regId]; if(numLBeads == 0 || *RegionMask != RegionMaskLive) return; //if(threadIdx.x==0 && blockIdx.y == 0 ) printf("region id: %d \n", regId ); //image coordinates size_t rx = threadIdx.x; const size_t ry = iy%ImgRegP.getRegH(); size_t idx = ImgRegP.getWellIdx(ix,iy); //per region values int numFrames = nptsRegion[regId]; //if(threadIdx.x==0 && threadIdx.y == 0 && blockIdx.y == 0 ) printf("num frames: %d, region id: %u\n", numFrames, regId ); float T0average = T0avg[regId]; constRegP += regId; perFlowRegP += regId; EmptyTraceComplete += regId; //per region arrays if(frameNumberRegion != NULL) frameNumberRegion += regId* ConstFrmP.getMaxCompFrames(); framesPerPointRegion += regId*ConstFrmP.getMaxCompFrames(); size_t numTBlocksRegion = (ImgRegP.getRegH()+blockDim.y-1)/blockDim.y; size_t TBlockId = ry/blockDim.y; //EmptyTraceAvgRegion +=regId* ConstFrmP.getUncompFrames(); //offset to first buffer of region EmptyTraceCountRegionTBlock += regId * numTBlocksRegion; EmptyTraceSumRegionTBlock += regId*numTBlocksRegion * ConstFrmP.getUncompFrames(); //offset to buffer for actual thread block int * EmptyTraceCountTBlock = EmptyTraceCountRegionTBlock + TBlockId; float * EmptyTraceSumTBlock = EmptyTraceSumRegionTBlock + TBlockId * ConstFrmP.getUncompFrames(); img += idx; FG_BUFFER_TYPE * fgPtr = (FG_BUFFER_TYPE *)img; bfmask += idx; t0Est += idx; //get actual region width size_t regWidth = ImgRegP.getRegW(regId); size_t warpOffset = 0; //slide warps accross row size_t windowSize = blockDim.x; #ifdef COLLECT_SAMPLES //Sample Offset BeadParamCube += idx; BeadStateMask += idx; //meta data SampleRowPtr += regId * (ImgRegP.getRegH()) + ry; SampleRowCounter += regId * ImgRegP.getRegH() + ry; //outputs: write offset in region buffer determined by SampleRowPtr + atomicAdd(SampleRowCounter,1); SampleStateMask += regId * NUM_SAMPLES_RF; SampleCompressedTraces += regId * NUM_SAMPLES_RF; SampleParamCube += regId * NUM_SAMPLES_RF; SampleCoord += regId * NUM_SAMPLES_RF; #endif // if(threadIdx.x == 0 && threadIdx.y == 0 && regId == 57) printf("KERNEL: reg 57, tb %lu, ix %lu, iy %lu, idx %lu, rx %lu, ry %lu\n", TBlockId, ix, iy, idx, rx, ry); //printf(" %p\n",(void*) ); /*if( blockIdx.y == 1){ if( threadIdx.x == 0 && threadIdx.y == 0){ printf("RegionMask %p\n",(void*)RegionMask ); printf("img %p\n",(void*)img ); printf("bfmask %p\n",(void*)bfmask ); printf("t0Est %p\n",(void*)t0Est ); printf("frameNumberRegion %p\n",(void*)frameNumberRegion ); printf("framesPerPointRegion %p\n",(void*)framesPerPointRegion ); printf("nptsRegion %p\n",(void*)nptsRegion ); printf("numlBeadsRegion %p\n",(void*)numlBeadsRegion ); printf("T0avg %p\n",(void*)T0avg ); printf("constRegP %p\n",(void*)constRegP ); printf("perFlowRegP %p\n",(void*)perFlowRegP ); printf("EmptyTraceAvgRegion %p\n",(void*)EmptyTraceAvgRegion ); printf("EmptyTraceSumRegionTBlock %p\n",(void*)EmptyTraceSumRegionTBlock ); printf("EmptyTraceCountRegionTBlock %p\n",(void*) EmptyTraceCountRegionTBlock); printf("EmptyTraceComplete %p\n",(void*)EmptyTraceComplete ); printf("BeadParamCube %p\n",(void*)BeadParamCube ); printf("BeadStateMask %p\n",(void*)BeadStateMask ); printf("SampleRowPtr %p\n",(void*) SampleRowPtr); printf("SampleRowCounter %p\n",(void*)SampleRowCounter ); printf("SampleStateMask %p\n",(void*)SampleStateMask ); printf("SampleCompressedTraces %p\n",(void*) SampleCompressedTraces); printf("SampleParamCube %p\n",(void*) SampleParamCube); printf("SampleCoord %p\n",(void*)SampleCoord ); } } */ int emptyCnt = 0; int usedUncomFrames = 0; //how many of the uncompressed frames are really used for compression? if(ImgRegP.isValidCoord(ix,iy) && ry < ImgRegP.getRegH(regId)) // skip work if warp outside of region { #ifdef COLLECT_SAMPLES #ifdef SAMPLES_IN_ORDER int SampleRowBaseOffset = *SampleRowPtr; #endif #endif while(warpOffset < regWidth ){ bool EmptyWell = false; int IamAlive = 0; *sm=0; //load mask for warp if(rx < regWidth){ IamAlive = (Match(bfmask,(MaskType)MaskLive)) ?1:0; //added empty reference wells to local live beads so we will work on them EmptyWell = useForEmpty(bfmask); // if(EmptyWell && blockIdx.y == 0) printf("%u %u I am Empty!!!\n" ,ix, iy); } *sm = IamAlive; WarpSumNoSync(sm); //sum up live beads in warp for local t0 average int numLBeadsWarp = *sm_warp_base; //Also contains valid empty reference wells //Do Work if IamAlive! ///////////////////////////////// //generate bead traces for live //beads in warp if(numLBeadsWarp > 0){ //if no live beads shift window right away //if there is no live bead but an empty in the warp the empty will be ignored //do t0 shift average over live wells in warp //DEBUG: local average only needed to match with vectorized version *sm = (IamAlive)?(1):(0); WarpSumNoSync(sm,8); //sum up local t0 for all live beads int liveCount8 = *sm; float localT0 = (IamAlive)?(*t0Est - T0average):(0); *smf = localT0; WarpSumNoSync(smf,8); //sum number of values to create local t0 if(!EmptyWell) //do not use local t0 average for empty traces localT0 = (liveCount8 > 0)?((*smf)/(float)liveCount8):(localT0); //localT0 = (*smf_warp_base)/ (float)numLBeadsWarp; //average for all live beads else emptyCnt++; //count empties vor empty average, this was moved up from after kernel execution /* #ifndef COLLECT_SAMPLES if(IamAlive || EmptyWell){ usedUncomFrames = LoadImgWOffset( fgPtr, img, frameNumberRegion, framesPerPointRegion, numFrames, ImgRegP.getPlaneStride(), localT0, EmptyWell, emptyTraceSum, constRegP->getTimeStart(), *perFlowRegP, idx ); //moved up //if(EmptyWell) emptyCnt++; } #else #ifndef SAMPLES_IN_ORDER if(IamAlive || EmptyWell){ SampleCoordPair myloc(rx,ry); usedUncomFrames = LoadImgWOffset_WithRegionalSampleExtraction( fgPtr, img, frameNumberRegion, framesPerPointRegion, numFrames, ImgRegP.getPlaneStride(), localT0, EmptyWell, emptyTraceSum, constRegP, perFlowRegP, idx, //sampling BeadParamCube, BeadStateMask, SampleRowPtr, SampleRowCounter, SampleStateMask, SampleCompressedTraces, SampleParamCube, SampleCoord, myloc ); //moved up //if(EmptyWell) emptyCnt++; } #endif #endif */ //#ifdef COLLECT_SAMPLES //#ifdef SAMPLES_IN_ORDER //*sm = *SampleRowPtr; // init sm with the offset for the first sample in this row. int writeOffset = SampleRowBaseOffset; //int SamplePlaneStride = NUM_SAMPLES_RF * ImgRegP.getNumRegions(); //if alive, sample and highquality mark as isSample bool isSample = (IamAlive && Match(BeadStateMask,(BkgModelMaskType)(BkgMaskRegionalSampled|BkgMaskHighQaulity), true)); *sm = (isSample)?(1):(0); int sum = 0; int myOffset = 0; for(int tid=0; tid < blockDim.x; tid++){ if (tid == threadIdx.x) myOffset = sum; // running sum before adding local value is write offset for current thread in warp sum += sm_warp_base[tid]; //calculate final offset, which ill be added to row offset to generate new base offset for next window. } SampleRowBaseOffset += sum; //update rowOffset for sliding window writeOffset += myOffset; //update write offset for current thread. if(IamAlive || EmptyWell){ SampleCoordPair myloc(rx,ry); usedUncomFrames = LoadImgWOffset_OnTheFlyCompressionWithRegionalSampleExtractionInOrder( fgPtr, img, frameNumberRegion, framesPerPointRegion, numFrames, ImgRegP.getPlaneStride(), localT0, EmptyWell, isSample, emptyTraceSum, constRegP, perFlowRegP, idx, BeadParamCube, BeadStateMask, SampleStateMask + writeOffset, SampleCompressedTraces+ writeOffset, SampleParamCube+ writeOffset, SampleCoord+ writeOffset, myloc ); } //#endif //#endif } //////////////////////////////// //move window along row rx += windowSize; warpOffset += windowSize; idx += windowSize; bfmask += windowSize; t0Est += windowSize; img += windowSize; fgPtr += windowSize; //#ifdef COLLECT_SAMPLES BeadParamCube += windowSize; BeadStateMask += windowSize; //#endif } } //Todo: if too slow try different approach //maybe trade some memory to do this more efficient and use a reduce kernel afterwards //seperate non atomic stores per block or even per row and then sum them up with a micro kernel //not super efficient but needs to be done... empty trace average //too many syncs (numFrmaes*2 + 1) and and too many atomics per block (numframes + 2)... *sm = emptyCnt; //WarpSumNoSync(sm); //determine number of empty traces handled by this block int emptyInBlockCnt = ReduceSharedMemory(sm_base, sm); //*sm_warp_base; //for float operations on same shared memory as ints if(emptyInBlockCnt > 0){ //only do work if at least on empty reference well was found in warp. //iterate over all frames of the empty traces collected by each thread for(int f=0; f<usedUncomFrames;f++){ //ConstFrmP.getUncompFrames() __syncthreads(); //guarantee that all sm operatins are completed before overwrite if(emptyCnt > 0) *smf = emptyTraceSum[f]; else *smf = 0.0f; //WarpSumNoSync(sm); float BlockFrameSum = ReduceSharedMemory(smf_base,smf); //sum up frame f of all empty trace handled by block if(threadIdx.x == 0 && threadIdx.y == 0){ EmptyTraceSumTBlock[f] = BlockFrameSum; //store trace for threadblock in global memory } } if(threadIdx.x == 0 && threadIdx.y == 0){ //if(regId == 57) printf("KERNEL: regId 57, tb %lu, emptyCnt: %d\n", TBlockId,emptyInBlockCnt ) ; *EmptyTraceCountTBlock = emptyInBlockCnt; // add count of empty traces to allow for later average calculation } } /* #if EMPTY_AVERAGE_IN_GENTRACE __threadfence(); //guarantee global previous writes are visible to all threads if(threadIdx.x == 0 && threadIdx.y == 0){ size_t done = atomicAdd(EmptyTraceComplete, 1); //increment complete counter done++; // inc return value to represent current value size_t numBlocks = (ImgRegP.getRegH(regId) + blockDim.y -1)/blockDim.y; //printf("block Idx %d in region %d done \n", (blockIdx.y/blockDim.y)%(ImgRegP.getRegH()/blockDim.y) , regId); if(done == numBlocks){ //if incremented return value == number of Blocks all blocks in region are completed so avg can be calculated size_t cnt = 0; //EmptyTraceCountTBlock = EmptyTraceCountRegionTBlock + TBlockId; //EmptyTraceSumTBlock = EmptyTraceSumRegionTBlock + TBlockId * ConstFrmP.getUncompFrames(); for(size_t b = 0; b <numTBlocksRegion; b++){ cnt += *EmptyTraceCountRegionTBlock; for(size_t f=0; f<usedUncomFrames;f++){ if(b ==0) emptyTraceSum[f] = EmptyTraceSumRegionTBlock[f]; else emptyTraceSum[f] += EmptyTraceSumRegionTBlock[f]; } EmptyTraceCountRegionTBlock++; EmptyTraceSumRegionTBlock += ConstFrmP.getUncompFrames(); } //printf("************************************** count: %d\n ", cnt); //*EmptyTraceComplete = cnt; // debug only //TODO: remove!! #if STORE_EMPTY_UNCOMPRESSED float dco = AverageRezeroEmptyNoCompression(EmptyTraceAvgRegion,emptyTraceSum, perFlowRegP, constRegP, cnt ); //if(regId ==27 && threadIdx.x == 0) //printf("GenBeadTraces reg 27 dcoffset t_Start: %f t_edn %f\n",constRegP->getTimeStart(),perFlowRegP->getTMidNuc()-MAGIC_OFFSET_FOR_EMPTY_TRACE); #else AverageRezeroTshiftAndCompressEmptyLocal(EmptyTraceAvgRegion,emptyTraceSum, cnt,framesPerPointRegion, numFrames, usedUncomFrames+1, constRegP->getTimeStart(), perFlowRegP); #endif } } #endif */ } //one threadblock per region //reduce numTBlocksPerReg partial sums and counts to empty average //each warp will produce a partial sum //sm layout: numwarps * uncompressed frames + numwaprs integers to sum up count //reduce empty average __global__ __launch_bounds__(128, 16) void ReduceEmptyAverage_k( unsigned short * RegionMask, const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, const float * frameNumberRegion, // from timing compression const int * framesPerPointRegion, const size_t * nptsRegion, //per region const float * EmptyTraceSumRegionTBlock, // has to be initialized to 0!! will contain sum of all empty trace frames for the row in a region const int * EmptyTraceCountRegionTBlock, // has to be initialized to 0!! will contain number of empty traces summed up for each warp in a region const size_t numTBlocksPerReg //float * dcOffset_debug ) { extern __shared__ float smemTracePartialSum[]; // uncompressed frames per warp float * smemEmptyTraceAvg = smemTracePartialSum; float * smemFrameInTraceWarp = smemEmptyTraceAvg + (threadIdx.y * ConstFrmP.getUncompFrames()); // point to first frame of warp-trace buffer int * smemCount = (int*)(smemEmptyTraceAvg + (blockDim.y * ConstFrmP.getUncompFrames())); // point to trace counter of warp 0 //same for all warps within block size_t regId = blockIdx.x + blockIdx.y * gridDim.x; RegionMask += regId; if( *RegionMask != RegionMaskLive) return; float * EmptyTraceAvgRegion = ConstHistCol.getWriteBufferEmptyTraceAvg() + regId * ConstFrmP.getUncompFrames(); constRegP += regId; perFlowRegP += regId; //dcOffset_debug += regId; int numFrames = nptsRegion[regId]; frameNumberRegion += regId* ConstFrmP.getMaxCompFrames(); framesPerPointRegion += regId*ConstFrmP.getMaxCompFrames(); //different for each warp in block EmptyTraceCountRegionTBlock += regId * numTBlocksPerReg + threadIdx.y; EmptyTraceSumRegionTBlock += (regId * numTBlocksPerReg + threadIdx.y) * ConstFrmP.getUncompFrames(); //sum up int count = 0; int psId = threadIdx.y; //partial sum Id while(psId < numTBlocksPerReg){ //iterate over partial sums (one per thread-block per region from the previous kernel int fx = threadIdx.x; // frame of partial sum count += *EmptyTraceCountRegionTBlock; while(fx < ConstFrmP.getUncompFrames()){ //sliding window over frames if(psId < blockDim.y) smemFrameInTraceWarp[fx] = 0; //sum up each frame of the traces. smemFrameInTraceWarp[fx] += EmptyTraceSumRegionTBlock[fx]; fx += blockDim.x; //slide window } //move to next partial sum psId += blockDim.y; EmptyTraceCountRegionTBlock += blockDim.y; EmptyTraceSumRegionTBlock += blockDim.y * ConstFrmP.getUncompFrames(); } if(threadIdx.x == 0){ smemCount[threadIdx.y] = count; //thread 0 of each warp store trace count to sm //printf( "%lu %d %d \n", regId, threadIdx.y, smemCount[threadIdx.y] ); } __syncthreads(); //each of the blockDim.y warps now has a partial sum of the regional empty traces in shared memory //the number of empty traces per sum is stored in smemCount int idx = threadIdx.x + blockDim.x * threadIdx.y; int blockSize = blockDim.x * blockDim.y; count = 0; for(int i= 0 ; i<blockDim.y; i++) count += smemCount[i]; while(idx < ConstFrmP.getUncompFrames()){ float sum = 0; for(int i= 0 ; i<blockDim.y; i++){ sum += smemEmptyTraceAvg[ ConstFrmP.getUncompFrames() * i + idx]; } smemEmptyTraceAvg[idx] = (count > 0)?(sum/count):(0); //EmptyTraceAvgRegion[idx] = smemEmptyTraceAvg[idx]; idx += blockSize; } __syncthreads(); if(count < THRESHOLD_NUM_EMPTIES){ unsigned short regMaskValue = *RegionMask; regMaskValue |= RegionMaskNumEmptiesBelowThreshold; if(count <= 0) regMaskValue |= RegionMaskNoEmpties; if(threadIdx.x == 0 && threadIdx.y == 0) *RegionMask = regMaskValue; } //Calculation uncompressed average empty trace per region completed. //AverageRezeroTshiftAndCompressEmptyLocal(EmptyTraceAvgRegion,smemEmptyTraceAvg, count,framesPerPointRegion, numFrames, ConstFrmP.getUncompFrames(), constRegP->getTimeStart(), perFlowRegP); //dc offset correction float tstart = constRegP->getTimeStart(); float tend = constRegP->getT0Frame() - MAGIC_OFFSET_FOR_EMPTY_TRACE; float dcOffset = ComputeDcOffsetForUncompressedTrace(smemEmptyTraceAvg, ConstFrmP.getUncompFrames() ,tstart,tend); //float dcOffset = 0; idx = threadIdx.x + blockDim.x * threadIdx.y; //if(idx == 0) *dcOffset_debug = dcOffset;//printf(" regid,%lu,nucId,%d,dcoffset,%f\n", regId, ConstFlowP.getNucId(), dcOffset); while(idx < numFrames){ TShiftAndPseudoCompressionOneFrame ( EmptyTraceAvgRegion , smemEmptyTraceAvg, frameNumberRegion, perFlowRegP->getTshift(), idx, dcOffset); idx += blockSize; } }
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Stava and Benes #define TILE_SIZE 16 #define TILE_GRID_SIZE 4 #define THREAD_PER_TILE 16 using namespace cv; namespace { // Returns the root index of the UFTree __device__ unsigned Find(const int* s_buf, unsigned n) { // Warning: do not call Find on a background pixel unsigned label = s_buf[n]; assert(label > 0); while (label - 1 != n) { n = label - 1; label = s_buf[n]; assert(label > 0); } return n; } __device__ void Union(int* s_buf, unsigned index_a, unsigned index_b, char* changed) { unsigned a = s_buf[index_a]; if (!a) return; unsigned b = s_buf[index_b]; if (!b) return; --a; --b; a = Find(s_buf, a); b = Find(s_buf, b); if (a != b) { *changed = 1; } if (a < b) { atomicMin(s_buf + b, a + 1); } else if (b < a) { atomicMin(s_buf + a, b + 1); } } // Perform local CCL on 16x16 tiles __global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { const unsigned r = threadIdx.y; const unsigned c = threadIdx.x; const unsigned local_index = r * blockDim.x + c; const unsigned global_row = blockIdx.y * blockDim.y + r; const unsigned global_col = blockIdx.x * blockDim.x + c; const unsigned img_index = global_row * img.step + global_col; __shared__ int s_buf[TILE_SIZE * TILE_SIZE]; __shared__ unsigned char s_img[TILE_SIZE * TILE_SIZE]; __shared__ char changed[1]; bool in_limits = (global_row < img.rows&& global_col < img.cols); s_img[local_index] = in_limits ? img[img_index] : 0; unsigned char v = s_img[local_index]; int label = v ? local_index + 1 : 0; __syncthreads(); while (1) { // Pass 1 of the CCL algorithm s_buf[local_index] = label; if (threadIdx.x == 0 && threadIdx.y == 0) { changed[0] = 0; } int new_label = label; __syncthreads(); // Find the minimal label from the neighboring elements if (label) { if (r > 0 && c > 0 && s_img[local_index - TILE_SIZE - 1]) { new_label = min(new_label, s_buf[local_index - TILE_SIZE - 1]); } if (r > 0 && s_img[local_index - TILE_SIZE]) { new_label = min(new_label, s_buf[local_index - TILE_SIZE]); } if (r > 0 && c < TILE_SIZE - 1 && s_img[local_index - TILE_SIZE + 1]) { new_label = min(new_label, s_buf[local_index - TILE_SIZE + 1]); } if (c > 0 && s_img[local_index - 1]) { new_label = min(new_label, s_buf[local_index - 1]); } if (c < TILE_SIZE - 1 && s_img[local_index + 1]) { new_label = min(new_label, s_buf[local_index + 1]); } if (r < TILE_SIZE - 1 && c > 0 && s_img[local_index + TILE_SIZE - 1]) { new_label = min(new_label, s_buf[local_index + TILE_SIZE - 1]); } if (r < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE]) { new_label = min(new_label, s_buf[local_index + TILE_SIZE]); } if (r < TILE_SIZE - 1 && c < TILE_SIZE - 1 && s_img[local_index + TILE_SIZE + 1]) { new_label = min(new_label, s_buf[local_index + TILE_SIZE + 1]); } } __syncthreads(); // If the new label is smaller than the old one merge the equivalence trees if (new_label < label) { atomicMin(s_buf + label - 1, new_label); changed[0] = 1; } __syncthreads(); if (changed[0] == 0) break; if (label) { // Pass 2 of the CCL algorithm label = Find(s_buf, label - 1) + 1; } __syncthreads(); } if (in_limits) { // Store the result to the device memory int global_label = 0; if (v) { unsigned f_row = (label - 1) / TILE_SIZE; unsigned f_col = (label - 1) % TILE_SIZE; global_label = (blockIdx.y * TILE_SIZE + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * TILE_SIZE + f_col) + 1; } labels.data[global_row * labels.step / sizeof(int) + global_col] = global_label; } } __global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels, uint32_t subBlockDim) { // Coordinates of the top-left pixel of the current block of tiles unsigned block_row = blockIdx.y * blockDim.y * subBlockDim; unsigned block_col = blockIdx.x * blockDim.x * subBlockDim; // Coordinates of the top-left pixel of the current tile unsigned tile_row = block_row + threadIdx.y * subBlockDim; unsigned tile_col = block_col + threadIdx.x * subBlockDim; unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z; __shared__ char changed[1]; while (1) { if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { changed[0] = 0; } __syncthreads(); // Process the bottom horizontal border for (unsigned i = 0; i < repetitions; i++) { unsigned r = tile_row + subBlockDim - 1; unsigned c = tile_col + i * blockDim.z + threadIdx.z; if (threadIdx.y < blockDim.y - 1 && r < img.rows - 1 && c < img.cols && c < tile_col + subBlockDim) { if (c > block_col) { Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c - 1, changed); } Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c, changed); if (c < img.cols - 1 && c < block_col + blockDim.x * subBlockDim - 1) { Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed); } } } // Process the right vertical border for (unsigned i = 0; i < repetitions; i++) { unsigned c = tile_col + subBlockDim - 1; unsigned r = tile_row + i * blockDim.z + threadIdx.z; if (threadIdx.x < blockDim.x - 1 && c < img.cols - 1 && r < img.rows && r < tile_row + subBlockDim) { if (r > block_row) { Union(labels.data, r * labels.step / sizeof(int) + c, (r - 1) * labels.step / sizeof(int) + c + 1, changed); } Union(labels.data, r * labels.step / sizeof(int) + c, r * labels.step / sizeof(int) + c + 1, changed); if (r < img.rows - 1 && r < block_row + blockDim.y * subBlockDim - 1) { Union(labels.data, r * labels.step / sizeof(int) + c, (r + 1) * labels.step / sizeof(int) + c + 1, changed); } } } __syncthreads(); if (changed[0] == 0) { break; // the tiles are merged } __syncthreads(); } } __global__ void BorderCompression(cuda::PtrStepSzi labels, uint32_t subBlockDim) { // Coordinates of the top-left pixel of the current block of tiles const unsigned block_row = blockIdx.y * blockDim.y * subBlockDim; const unsigned block_col = blockIdx.x * blockDim.x * subBlockDim; // Coordinates of the top-left pixel of the current tile const unsigned tile_row = block_row + threadIdx.y * subBlockDim; const unsigned tile_col = block_col + threadIdx.x * subBlockDim; const unsigned repetitions = (subBlockDim + blockDim.z - 1) / blockDim.z; // Process the bottom horizontal border for (unsigned i = 0; i < repetitions; i++) { const unsigned r = tile_row + subBlockDim - 1; const unsigned c = tile_col + i * blockDim.z + threadIdx.z; if (threadIdx.y < blockDim.y - 1 && r < labels.rows - 1 && c < labels.cols && c < tile_col + subBlockDim) { int label = labels[r * labels.step / sizeof(int) + c]; if (label) { labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1; } } } // Process the right vertical border for (unsigned i = 0; i < repetitions; i++) { const unsigned c = tile_col + subBlockDim - 1; const unsigned r = tile_row + i * blockDim.z + threadIdx.z; if (threadIdx.x < blockDim.x - 1 && c < labels.cols - 1 && r < labels.rows && r < tile_row + subBlockDim) { int label = labels[r * labels.step / sizeof(int) + c]; if (label) { labels[r * labels.step / sizeof(int) + c] = Find(labels, label - 1) + 1; } } } } __global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * blockDim.y + threadIdx.y; unsigned global_col = blockIdx.x * blockDim.x + threadIdx.x; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < labels.rows && global_col < labels.cols) { unsigned char val = img[global_row * img.step + global_col]; if (val) { labels[labels_index] = Find(labels.data, labels_index) + 1; } } } } class STAVA : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; public: STAVA() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1); block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1); // Phase 1 // Label pixels locally to a tile LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // Phase 1 output //cuda::GpuMat d_local_labels; //d_img_labels_.copyTo(d_local_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels); //Mat1i local_labels(img_.size()); //d_local_labels.download(local_labels); // Phase 2 // Merges together Union-Find trees from different tiles, in a recursive manner uint32_t max_img_dim = max(img_.rows, img_.cols); uint32_t sub_block_dim = TILE_SIZE; uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE; dim3 grid_size_merge; dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE); while (sub_block_dim < max_img_dim) { grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1); GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim); BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim); sub_block_dim = block_pixels; block_pixels *= TILE_GRID_SIZE; // Phase 2 output //cuda::GpuMat d_global_labels; //d_img_labels_.copyTo(d_global_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //Mat1i global_labels(img_.size()); //d_global_labels.download(global_labels); } // Phase 3 PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); cudaDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void LocalScan() { grid_size_ = dim3((d_img_.cols + TILE_SIZE - 1) / TILE_SIZE, (d_img_.rows + TILE_SIZE - 1) / TILE_SIZE, 1); block_size_ = dim3(TILE_SIZE, TILE_SIZE, 1); LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); cudaDeviceSynchronize(); } void GlobalScan() { uint32_t max_img_dim = max(img_.rows, img_.cols); uint32_t sub_block_dim = TILE_SIZE; uint32_t block_pixels = sub_block_dim * TILE_GRID_SIZE; dim3 grid_size_merge; dim3 block_size_merge = dim3(TILE_GRID_SIZE, TILE_GRID_SIZE, THREAD_PER_TILE); while (sub_block_dim < max_img_dim) { grid_size_merge = dim3((d_img_.cols + block_pixels - 1) / block_pixels, (d_img_.rows + block_pixels - 1) / block_pixels, 1); GlobalMerge << <grid_size_merge, block_size_merge >> > (d_img_, d_img_labels_, sub_block_dim); BorderCompression << <grid_size_merge, block_size_merge >> > (d_img_labels_, sub_block_dim); sub_block_dim = block_pixels; block_pixels *= TILE_GRID_SIZE; } // Phase 3 PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); LocalScan(); perf_.stop(); perf_.store(Step(StepType::FIRST_SCAN), perf_.last()); perf_.start(); GlobalScan(); perf_.stop(); perf_.store(Step(StepType::SECOND_SCAN), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(STAVA);
the_stack
__constant__ unsigned char c_E8_bitslice_roundconstant[42][32] = { { 0x72, 0xd5, 0xde, 0xa2, 0xdf, 0x15, 0xf8, 0x67, 0x7b, 0x84, 0x15, 0xa, 0xb7, 0x23, 0x15, 0x57, 0x81, 0xab, 0xd6, 0x90, 0x4d, 0x5a, 0x87, 0xf6, 0x4e, 0x9f, 0x4f, 0xc5, 0xc3, 0xd1, 0x2b, 0x40 }, { 0xea, 0x98, 0x3a, 0xe0, 0x5c, 0x45, 0xfa, 0x9c, 0x3, 0xc5, 0xd2, 0x99, 0x66, 0xb2, 0x99, 0x9a, 0x66, 0x2, 0x96, 0xb4, 0xf2, 0xbb, 0x53, 0x8a, 0xb5, 0x56, 0x14, 0x1a, 0x88, 0xdb, 0xa2, 0x31 }, { 0x3, 0xa3, 0x5a, 0x5c, 0x9a, 0x19, 0xe, 0xdb, 0x40, 0x3f, 0xb2, 0xa, 0x87, 0xc1, 0x44, 0x10, 0x1c, 0x5, 0x19, 0x80, 0x84, 0x9e, 0x95, 0x1d, 0x6f, 0x33, 0xeb, 0xad, 0x5e, 0xe7, 0xcd, 0xdc }, { 0x10, 0xba, 0x13, 0x92, 0x2, 0xbf, 0x6b, 0x41, 0xdc, 0x78, 0x65, 0x15, 0xf7, 0xbb, 0x27, 0xd0, 0xa, 0x2c, 0x81, 0x39, 0x37, 0xaa, 0x78, 0x50, 0x3f, 0x1a, 0xbf, 0xd2, 0x41, 0x0, 0x91, 0xd3 }, { 0x42, 0x2d, 0x5a, 0xd, 0xf6, 0xcc, 0x7e, 0x90, 0xdd, 0x62, 0x9f, 0x9c, 0x92, 0xc0, 0x97, 0xce, 0x18, 0x5c, 0xa7, 0xb, 0xc7, 0x2b, 0x44, 0xac, 0xd1, 0xdf, 0x65, 0xd6, 0x63, 0xc6, 0xfc, 0x23 }, { 0x97, 0x6e, 0x6c, 0x3, 0x9e, 0xe0, 0xb8, 0x1a, 0x21, 0x5, 0x45, 0x7e, 0x44, 0x6c, 0xec, 0xa8, 0xee, 0xf1, 0x3, 0xbb, 0x5d, 0x8e, 0x61, 0xfa, 0xfd, 0x96, 0x97, 0xb2, 0x94, 0x83, 0x81, 0x97 }, { 0x4a, 0x8e, 0x85, 0x37, 0xdb, 0x3, 0x30, 0x2f, 0x2a, 0x67, 0x8d, 0x2d, 0xfb, 0x9f, 0x6a, 0x95, 0x8a, 0xfe, 0x73, 0x81, 0xf8, 0xb8, 0x69, 0x6c, 0x8a, 0xc7, 0x72, 0x46, 0xc0, 0x7f, 0x42, 0x14 }, { 0xc5, 0xf4, 0x15, 0x8f, 0xbd, 0xc7, 0x5e, 0xc4, 0x75, 0x44, 0x6f, 0xa7, 0x8f, 0x11, 0xbb, 0x80, 0x52, 0xde, 0x75, 0xb7, 0xae, 0xe4, 0x88, 0xbc, 0x82, 0xb8, 0x0, 0x1e, 0x98, 0xa6, 0xa3, 0xf4 }, { 0x8e, 0xf4, 0x8f, 0x33, 0xa9, 0xa3, 0x63, 0x15, 0xaa, 0x5f, 0x56, 0x24, 0xd5, 0xb7, 0xf9, 0x89, 0xb6, 0xf1, 0xed, 0x20, 0x7c, 0x5a, 0xe0, 0xfd, 0x36, 0xca, 0xe9, 0x5a, 0x6, 0x42, 0x2c, 0x36 }, { 0xce, 0x29, 0x35, 0x43, 0x4e, 0xfe, 0x98, 0x3d, 0x53, 0x3a, 0xf9, 0x74, 0x73, 0x9a, 0x4b, 0xa7, 0xd0, 0xf5, 0x1f, 0x59, 0x6f, 0x4e, 0x81, 0x86, 0xe, 0x9d, 0xad, 0x81, 0xaf, 0xd8, 0x5a, 0x9f }, { 0xa7, 0x5, 0x6, 0x67, 0xee, 0x34, 0x62, 0x6a, 0x8b, 0xb, 0x28, 0xbe, 0x6e, 0xb9, 0x17, 0x27, 0x47, 0x74, 0x7, 0x26, 0xc6, 0x80, 0x10, 0x3f, 0xe0, 0xa0, 0x7e, 0x6f, 0xc6, 0x7e, 0x48, 0x7b }, { 0xd, 0x55, 0xa, 0xa5, 0x4a, 0xf8, 0xa4, 0xc0, 0x91, 0xe3, 0xe7, 0x9f, 0x97, 0x8e, 0xf1, 0x9e, 0x86, 0x76, 0x72, 0x81, 0x50, 0x60, 0x8d, 0xd4, 0x7e, 0x9e, 0x5a, 0x41, 0xf3, 0xe5, 0xb0, 0x62 }, { 0xfc, 0x9f, 0x1f, 0xec, 0x40, 0x54, 0x20, 0x7a, 0xe3, 0xe4, 0x1a, 0x0, 0xce, 0xf4, 0xc9, 0x84, 0x4f, 0xd7, 0x94, 0xf5, 0x9d, 0xfa, 0x95, 0xd8, 0x55, 0x2e, 0x7e, 0x11, 0x24, 0xc3, 0x54, 0xa5 }, { 0x5b, 0xdf, 0x72, 0x28, 0xbd, 0xfe, 0x6e, 0x28, 0x78, 0xf5, 0x7f, 0xe2, 0xf, 0xa5, 0xc4, 0xb2, 0x5, 0x89, 0x7c, 0xef, 0xee, 0x49, 0xd3, 0x2e, 0x44, 0x7e, 0x93, 0x85, 0xeb, 0x28, 0x59, 0x7f }, { 0x70, 0x5f, 0x69, 0x37, 0xb3, 0x24, 0x31, 0x4a, 0x5e, 0x86, 0x28, 0xf1, 0x1d, 0xd6, 0xe4, 0x65, 0xc7, 0x1b, 0x77, 0x4, 0x51, 0xb9, 0x20, 0xe7, 0x74, 0xfe, 0x43, 0xe8, 0x23, 0xd4, 0x87, 0x8a }, { 0x7d, 0x29, 0xe8, 0xa3, 0x92, 0x76, 0x94, 0xf2, 0xdd, 0xcb, 0x7a, 0x9, 0x9b, 0x30, 0xd9, 0xc1, 0x1d, 0x1b, 0x30, 0xfb, 0x5b, 0xdc, 0x1b, 0xe0, 0xda, 0x24, 0x49, 0x4f, 0xf2, 0x9c, 0x82, 0xbf }, { 0xa4, 0xe7, 0xba, 0x31, 0xb4, 0x70, 0xbf, 0xff, 0xd, 0x32, 0x44, 0x5, 0xde, 0xf8, 0xbc, 0x48, 0x3b, 0xae, 0xfc, 0x32, 0x53, 0xbb, 0xd3, 0x39, 0x45, 0x9f, 0xc3, 0xc1, 0xe0, 0x29, 0x8b, 0xa0 }, { 0xe5, 0xc9, 0x5, 0xfd, 0xf7, 0xae, 0x9, 0xf, 0x94, 0x70, 0x34, 0x12, 0x42, 0x90, 0xf1, 0x34, 0xa2, 0x71, 0xb7, 0x1, 0xe3, 0x44, 0xed, 0x95, 0xe9, 0x3b, 0x8e, 0x36, 0x4f, 0x2f, 0x98, 0x4a }, { 0x88, 0x40, 0x1d, 0x63, 0xa0, 0x6c, 0xf6, 0x15, 0x47, 0xc1, 0x44, 0x4b, 0x87, 0x52, 0xaf, 0xff, 0x7e, 0xbb, 0x4a, 0xf1, 0xe2, 0xa, 0xc6, 0x30, 0x46, 0x70, 0xb6, 0xc5, 0xcc, 0x6e, 0x8c, 0xe6 }, { 0xa4, 0xd5, 0xa4, 0x56, 0xbd, 0x4f, 0xca, 0x0, 0xda, 0x9d, 0x84, 0x4b, 0xc8, 0x3e, 0x18, 0xae, 0x73, 0x57, 0xce, 0x45, 0x30, 0x64, 0xd1, 0xad, 0xe8, 0xa6, 0xce, 0x68, 0x14, 0x5c, 0x25, 0x67 }, { 0xa3, 0xda, 0x8c, 0xf2, 0xcb, 0xe, 0xe1, 0x16, 0x33, 0xe9, 0x6, 0x58, 0x9a, 0x94, 0x99, 0x9a, 0x1f, 0x60, 0xb2, 0x20, 0xc2, 0x6f, 0x84, 0x7b, 0xd1, 0xce, 0xac, 0x7f, 0xa0, 0xd1, 0x85, 0x18 }, { 0x32, 0x59, 0x5b, 0xa1, 0x8d, 0xdd, 0x19, 0xd3, 0x50, 0x9a, 0x1c, 0xc0, 0xaa, 0xa5, 0xb4, 0x46, 0x9f, 0x3d, 0x63, 0x67, 0xe4, 0x4, 0x6b, 0xba, 0xf6, 0xca, 0x19, 0xab, 0xb, 0x56, 0xee, 0x7e }, { 0x1f, 0xb1, 0x79, 0xea, 0xa9, 0x28, 0x21, 0x74, 0xe9, 0xbd, 0xf7, 0x35, 0x3b, 0x36, 0x51, 0xee, 0x1d, 0x57, 0xac, 0x5a, 0x75, 0x50, 0xd3, 0x76, 0x3a, 0x46, 0xc2, 0xfe, 0xa3, 0x7d, 0x70, 0x1 }, { 0xf7, 0x35, 0xc1, 0xaf, 0x98, 0xa4, 0xd8, 0x42, 0x78, 0xed, 0xec, 0x20, 0x9e, 0x6b, 0x67, 0x79, 0x41, 0x83, 0x63, 0x15, 0xea, 0x3a, 0xdb, 0xa8, 0xfa, 0xc3, 0x3b, 0x4d, 0x32, 0x83, 0x2c, 0x83 }, { 0xa7, 0x40, 0x3b, 0x1f, 0x1c, 0x27, 0x47, 0xf3, 0x59, 0x40, 0xf0, 0x34, 0xb7, 0x2d, 0x76, 0x9a, 0xe7, 0x3e, 0x4e, 0x6c, 0xd2, 0x21, 0x4f, 0xfd, 0xb8, 0xfd, 0x8d, 0x39, 0xdc, 0x57, 0x59, 0xef }, { 0x8d, 0x9b, 0xc, 0x49, 0x2b, 0x49, 0xeb, 0xda, 0x5b, 0xa2, 0xd7, 0x49, 0x68, 0xf3, 0x70, 0xd, 0x7d, 0x3b, 0xae, 0xd0, 0x7a, 0x8d, 0x55, 0x84, 0xf5, 0xa5, 0xe9, 0xf0, 0xe4, 0xf8, 0x8e, 0x65 }, { 0xa0, 0xb8, 0xa2, 0xf4, 0x36, 0x10, 0x3b, 0x53, 0xc, 0xa8, 0x7, 0x9e, 0x75, 0x3e, 0xec, 0x5a, 0x91, 0x68, 0x94, 0x92, 0x56, 0xe8, 0x88, 0x4f, 0x5b, 0xb0, 0x5c, 0x55, 0xf8, 0xba, 0xbc, 0x4c }, { 0xe3, 0xbb, 0x3b, 0x99, 0xf3, 0x87, 0x94, 0x7b, 0x75, 0xda, 0xf4, 0xd6, 0x72, 0x6b, 0x1c, 0x5d, 0x64, 0xae, 0xac, 0x28, 0xdc, 0x34, 0xb3, 0x6d, 0x6c, 0x34, 0xa5, 0x50, 0xb8, 0x28, 0xdb, 0x71 }, { 0xf8, 0x61, 0xe2, 0xf2, 0x10, 0x8d, 0x51, 0x2a, 0xe3, 0xdb, 0x64, 0x33, 0x59, 0xdd, 0x75, 0xfc, 0x1c, 0xac, 0xbc, 0xf1, 0x43, 0xce, 0x3f, 0xa2, 0x67, 0xbb, 0xd1, 0x3c, 0x2, 0xe8, 0x43, 0xb0 }, { 0x33, 0xa, 0x5b, 0xca, 0x88, 0x29, 0xa1, 0x75, 0x7f, 0x34, 0x19, 0x4d, 0xb4, 0x16, 0x53, 0x5c, 0x92, 0x3b, 0x94, 0xc3, 0xe, 0x79, 0x4d, 0x1e, 0x79, 0x74, 0x75, 0xd7, 0xb6, 0xee, 0xaf, 0x3f }, { 0xea, 0xa8, 0xd4, 0xf7, 0xbe, 0x1a, 0x39, 0x21, 0x5c, 0xf4, 0x7e, 0x9, 0x4c, 0x23, 0x27, 0x51, 0x26, 0xa3, 0x24, 0x53, 0xba, 0x32, 0x3c, 0xd2, 0x44, 0xa3, 0x17, 0x4a, 0x6d, 0xa6, 0xd5, 0xad }, { 0xb5, 0x1d, 0x3e, 0xa6, 0xaf, 0xf2, 0xc9, 0x8, 0x83, 0x59, 0x3d, 0x98, 0x91, 0x6b, 0x3c, 0x56, 0x4c, 0xf8, 0x7c, 0xa1, 0x72, 0x86, 0x60, 0x4d, 0x46, 0xe2, 0x3e, 0xcc, 0x8, 0x6e, 0xc7, 0xf6 }, { 0x2f, 0x98, 0x33, 0xb3, 0xb1, 0xbc, 0x76, 0x5e, 0x2b, 0xd6, 0x66, 0xa5, 0xef, 0xc4, 0xe6, 0x2a, 0x6, 0xf4, 0xb6, 0xe8, 0xbe, 0xc1, 0xd4, 0x36, 0x74, 0xee, 0x82, 0x15, 0xbc, 0xef, 0x21, 0x63 }, { 0xfd, 0xc1, 0x4e, 0xd, 0xf4, 0x53, 0xc9, 0x69, 0xa7, 0x7d, 0x5a, 0xc4, 0x6, 0x58, 0x58, 0x26, 0x7e, 0xc1, 0x14, 0x16, 0x6, 0xe0, 0xfa, 0x16, 0x7e, 0x90, 0xaf, 0x3d, 0x28, 0x63, 0x9d, 0x3f }, { 0xd2, 0xc9, 0xf2, 0xe3, 0x0, 0x9b, 0xd2, 0xc, 0x5f, 0xaa, 0xce, 0x30, 0xb7, 0xd4, 0xc, 0x30, 0x74, 0x2a, 0x51, 0x16, 0xf2, 0xe0, 0x32, 0x98, 0xd, 0xeb, 0x30, 0xd8, 0xe3, 0xce, 0xf8, 0x9a }, { 0x4b, 0xc5, 0x9e, 0x7b, 0xb5, 0xf1, 0x79, 0x92, 0xff, 0x51, 0xe6, 0x6e, 0x4, 0x86, 0x68, 0xd3, 0x9b, 0x23, 0x4d, 0x57, 0xe6, 0x96, 0x67, 0x31, 0xcc, 0xe6, 0xa6, 0xf3, 0x17, 0xa, 0x75, 0x5 }, { 0xb1, 0x76, 0x81, 0xd9, 0x13, 0x32, 0x6c, 0xce, 0x3c, 0x17, 0x52, 0x84, 0xf8, 0x5, 0xa2, 0x62, 0xf4, 0x2b, 0xcb, 0xb3, 0x78, 0x47, 0x15, 0x47, 0xff, 0x46, 0x54, 0x82, 0x23, 0x93, 0x6a, 0x48 }, { 0x38, 0xdf, 0x58, 0x7, 0x4e, 0x5e, 0x65, 0x65, 0xf2, 0xfc, 0x7c, 0x89, 0xfc, 0x86, 0x50, 0x8e, 0x31, 0x70, 0x2e, 0x44, 0xd0, 0xb, 0xca, 0x86, 0xf0, 0x40, 0x9, 0xa2, 0x30, 0x78, 0x47, 0x4e }, { 0x65, 0xa0, 0xee, 0x39, 0xd1, 0xf7, 0x38, 0x83, 0xf7, 0x5e, 0xe9, 0x37, 0xe4, 0x2c, 0x3a, 0xbd, 0x21, 0x97, 0xb2, 0x26, 0x1, 0x13, 0xf8, 0x6f, 0xa3, 0x44, 0xed, 0xd1, 0xef, 0x9f, 0xde, 0xe7 }, { 0x8b, 0xa0, 0xdf, 0x15, 0x76, 0x25, 0x92, 0xd9, 0x3c, 0x85, 0xf7, 0xf6, 0x12, 0xdc, 0x42, 0xbe, 0xd8, 0xa7, 0xec, 0x7c, 0xab, 0x27, 0xb0, 0x7e, 0x53, 0x8d, 0x7d, 0xda, 0xaa, 0x3e, 0xa8, 0xde }, { 0xaa, 0x25, 0xce, 0x93, 0xbd, 0x2, 0x69, 0xd8, 0x5a, 0xf6, 0x43, 0xfd, 0x1a, 0x73, 0x8, 0xf9, 0xc0, 0x5f, 0xef, 0xda, 0x17, 0x4a, 0x19, 0xa5, 0x97, 0x4d, 0x66, 0x33, 0x4c, 0xfd, 0x21, 0x6a }, { 0x35, 0xb4, 0x98, 0x31, 0xdb, 0x41, 0x15, 0x70, 0xea, 0x1e, 0xf, 0xbb, 0xed, 0xcd, 0x54, 0x9b, 0x9a, 0xd0, 0x63, 0xa1, 0x51, 0x97, 0x40, 0x72, 0xf6, 0x75, 0x9d, 0xbf, 0x91, 0x47, 0x6f, 0xe2 } }; #define SWAP4(x,y)\ y = (x & 0xf0f0f0f0UL); \ x = (x ^ y); \ y = (y >> 4); \ x = (x << 4); \ x= x | y; #define SWAP2(x,y)\ y = (x & 0xccccccccUL); \ x = (x ^ y); \ y = (y >> 2); \ x = (x << 2); \ x= x | y; #define SWAP1(x,y)\ y = (x & 0xaaaaaaaaUL); \ x = (x ^ y); \ y = (y >> 1); \ x = x + x; \ x= x | y; /*swapping bits 16i||16i+1||......||16i+7 with bits 16i+8||16i+9||......||16i+15 of 32-bit x*/ //#define SWAP8(x) (x) = ((((x) & 0x00ff00ffUL) << 8) | (((x) & 0xff00ff00UL) >> 8)); #define SWAP8(x) (x) = __byte_perm(x, x, 0x2301); /*swapping bits 32i||32i+1||......||32i+15 with bits 32i+16||32i+17||......||32i+31 of 32-bit x*/ //#define SWAP16(x) (x) = ((((x) & 0x0000ffffUL) << 16) | (((x) & 0xffff0000UL) >> 16)); #define SWAP16(x) (x) = __byte_perm(x, x, 0x1032); /*The MDS transform*/ #define L(m0,m1,m2,m3,m4,m5,m6,m7) \ (m4) ^= (m1); \ (m5) ^= (m2); \ (m6) ^= (m0) ^ (m3); \ (m7) ^= (m0); \ (m0) ^= (m5); \ (m1) ^= (m6); \ (m2) ^= (m4) ^ (m7); \ (m3) ^= (m4); /*The Sbox*/ #define Sbox(m0,m1,m2,m3,cc) \ m3 = ~(m3); \ m0 ^= ((~(m2)) & (cc)); \ temp0 = (cc) ^ ((m0) & (m1));\ m0 ^= ((m2) & (m3)); \ m3 ^= ((~(m1)) & (m2)); \ m1 ^= ((m0) & (m2)); \ m2 ^= ((m0) & (~(m3))); \ m0 ^= ((m1) | (m3)); \ m3 ^= ((m1) & (m2)); \ m1 ^= (temp0 & (m0)); \ m2 ^= temp0; static __device__ __forceinline__ void Sbox_and_MDS_layer(uint32_t x[8][4], uint32_t roundnumber) { uint32_t temp0; uint32_t cc0, cc1; //Sbox and MDS layer #pragma unroll 4 for (int i = 0; i < 4; i++) { cc0 = c_E8_bitslice_roundconstant[roundnumber][i]; cc1 = c_E8_bitslice_roundconstant[roundnumber][i + 4]; Sbox(x[0][i], x[2][i], x[4][i], x[6][i], cc0); Sbox(x[1][i], x[3][i], x[5][i], x[7][i], cc1); L(x[0][i], x[2][i], x[4][i], x[6][i], x[1][i], x[3][i], x[5][i], x[7][i]); } } static __device__ __forceinline__ void RoundFunction0(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { uint32_t y; SWAP1(x[j][0], y); SWAP1(x[j][1], y); SWAP1(x[j][2], y); SWAP1(x[j][3], y); } } static __device__ __forceinline__ void RoundFunction1(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { uint32_t y; SWAP2(x[j][0], y); SWAP2(x[j][1], y); SWAP2(x[j][2], y); SWAP2(x[j][3], y); } } static __device__ __forceinline__ void RoundFunction2(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { uint32_t y; SWAP4(x[j][0], y); SWAP4(x[j][1], y); SWAP4(x[j][2], y); SWAP4(x[j][3], y); } } static __device__ __forceinline__ void RoundFunction3(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { #pragma unroll 4 for (int i = 0; i < 4; i++) SWAP8(x[j][i]); } } static __device__ __forceinline__ void RoundFunction4(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { #pragma unroll 4 for (int i = 0; i < 4; i++) SWAP16(x[j][i]); } } static __device__ __forceinline__ void RoundFunction5(uint32_t x[8][4], uint32_t roundnumber) { uint32_t temp0; Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { #pragma unroll 2 for (int i = 0; i < 4; i = i + 2) { temp0 = x[j][i]; x[j][i] = x[j][i + 1]; x[j][i + 1] = temp0; } } } static __device__ __forceinline__ void RoundFunction6(uint32_t x[8][4], uint32_t roundnumber) { uint32_t temp0; Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j + 2) { #pragma unroll 2 for (int i = 0; i < 2; i++) { temp0 = x[j][i]; x[j][i] = x[j][i + 2]; x[j][i + 2] = temp0; } } } /*The bijective function E8, in bitslice form */ static __device__ __forceinline__ void E8(uint32_t x[8][4]) { /*perform 6 rounds*/ //#pragma unroll 6 for (int i = 0; i < 42; i += 7) { RoundFunction0(x, i); RoundFunction1(x, i + 1); RoundFunction2(x, i + 2); RoundFunction3(x, i + 3); RoundFunction4(x, i + 4); RoundFunction5(x, i + 5); RoundFunction6(x, i + 6); } } static __device__ __forceinline__ void F8(uint32_t x[8][4], const uint32_t buffer[16]) { /*xor the 512-bit message with the fist half of the 1024-bit hash state*/ #pragma unroll 16 for (int i = 0; i < 16; i++) x[i >> 2][i & 3] ^= ((uint32_t*)buffer)[i]; /*the bijective function E8 */ E8(x); /*xor the 512-bit message with the second half of the 1024-bit hash state*/ #pragma unroll 16 for (int i = 0; i < 16; i++) x[(16 + i) >> 2][(16 + i) & 3] ^= ((uint32_t*)buffer)[i]; } // Die Hash-Funktion __global__ __launch_bounds__(256, 4) void quark_jh512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *g_hash, uint32_t *g_nonceVector) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); uint32_t hashPosition = nounce - startNounce; uint32_t *Hash = &g_hash[16 * hashPosition]; uint32_t x[8][4] = { { 0x964bd16f, 0x17aa003e, 0x052e6a63, 0x43d5157a }, { 0x8d5e228a, 0x0bef970c, 0x591234e9, 0x61c3b3f2 }, { 0xc1a01d89, 0x1e806f53, 0x6b05a92a, 0x806d2bea }, { 0xdbcc8e58, 0xa6ba7520, 0x763a0fa9, 0xf73bf8ba }, { 0x05e66901, 0x694ae341, 0x8e8ab546, 0x5ae66f2e }, { 0xd0a74710, 0x243c84c1, 0xb1716e3b, 0x99c15a2d }, { 0xecf657cf, 0x56f8b19d, 0x7c8806a7, 0x56b11657 }, { 0xdffcc2e3, 0xfb1785e6, 0x78465a54, 0x4bdd8ccc } }; #pragma unroll 16 for (int i = 0; i < 16; i++) x[i >> 2][i & 3] ^= ((uint32_t*)Hash)[i]; E8(x); #pragma unroll 16 for (int i = 0; i < 16; i++) x[(16 + i) >> 2][(16 + i) & 3] ^= ((uint32_t*)Hash)[i]; x[0 >> 2][0 & 3] ^= 0x80; x[15 >> 2][15 & 3] ^= 0x00020000; E8(x); x[(16 + 0) >> 2][(16 + 0) & 3] ^= 0x80; x[(16 + 15) >> 2][(16 + 15) & 3] ^= 0x00020000; Hash[0] = x[4][0]; Hash[1] = x[4][1]; Hash[2] = x[4][2]; Hash[3] = x[4][3]; Hash[4] = x[5][0]; Hash[5] = x[5][1]; Hash[6] = x[5][2]; Hash[7] = x[5][3]; Hash[8] = x[6][0]; Hash[9] = x[6][1]; Hash[10] = x[6][2]; Hash[11] = x[6][3]; Hash[12] = x[7][0]; Hash[13] = x[7][1]; Hash[14] = x[7][2]; Hash[15] = x[7][3]; } } // Die Hash-Funktion #define TPB2 256 __global__ __launch_bounds__(TPB2, 4) void quark_jh512_gpu_hash_64_final(uint32_t threads, uint32_t startNounce, uint64_t *const __restrict__ g_hash, const uint32_t *const __restrict__ g_nonceVector) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread); int hashPosition = nounce - startNounce; uint32_t *Hash = (uint32_t*)&g_hash[8 * hashPosition]; uint32_t x[8][4] = { { 0x964bd16f, 0x17aa003e, 0x052e6a63, 0x43d5157a }, { 0x8d5e228a, 0x0bef970c, 0x591234e9, 0x61c3b3f2 }, { 0xc1a01d89, 0x1e806f53, 0x6b05a92a, 0x806d2bea }, { 0xdbcc8e58, 0xa6ba7520, 0x763a0fa9, 0xf73bf8ba }, { 0x05e66901, 0x694ae341, 0x8e8ab546, 0x5ae66f2e }, { 0xd0a74710, 0x243c84c1, 0xb1716e3b, 0x99c15a2d }, { 0xecf657cf, 0x56f8b19d, 0x7c8806a7, 0x56b11657 }, { 0xdffcc2e3, 0xfb1785e6, 0x78465a54, 0x4bdd8ccc } }; F8(x, Hash); x[0][0] ^= 0x80U; x[3][3] ^= 0x00020000U; for (int i = 0; i < 42; i += 7) { RoundFunction0(x, i); RoundFunction1(x, i + 1); RoundFunction2(x, i + 2); RoundFunction3(x, i + 3); RoundFunction4(x, i + 4); RoundFunction5(x, i + 5); RoundFunction6(x, i + 6); } Hash[7] = x[5][3]; } } __host__ void quark_jh512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash) { const uint32_t threadsperblock = 32; // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); quark_jh512_gpu_hash_64<<<grid, block>>>(threads, startNounce, d_hash, d_nonceVector); } // Setup-Funktionen __host__ void quark_jh512_cpu_init(int thr_id, uint32_t threads) { } __host__ void quark_jh512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash) { dim3 grid((threads + TPB2 - 1) / TPB2); dim3 block(TPB2); quark_jh512_gpu_hash_64_final << <grid, block >> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector); }
the_stack
__global__ void kernel(){ //====================================================================================================================================================== // COMMON VARIABLES //====================================================================================================================================================== fp* d_in; int rot_row; int rot_col; int in2_rowlow; int in2_collow; int ic; int jc; int jp1; int ja1, ja2; int ip1; int ia1, ia2; int ja, jb; int ia, ib; float s; int i; int j; int row; int col; int ori_row; int ori_col; int position; float sum; int pos_ori; float temp; float temp2; int location; int cent; int tMask_row; int tMask_col; float largest_value_current = 0; float largest_value = 0; int largest_coordinate_current = 0; int largest_coordinate = 0; float fin_max_val = 0; int fin_max_coo = 0; int largest_row; int largest_col; int offset_row; int offset_col; __shared__ float in_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_sqr_partial_sum[51]; // WATCH THIS !!! HARDCODED VALUE __shared__ float in_final_sum; __shared__ float in_sqr_final_sum; float mean; float mean_sqr; float variance; float deviation; __shared__ float denomT; __shared__ float par_max_val[131]; // WATCH THIS !!! HARDCODED VALUE __shared__ int par_max_coo[131]; // WATCH THIS !!! HARDCODED VALUE int pointer; __shared__ float d_in_mod_temp[2601]; int ori_pointer; int loc_pointer; //====================================================================================================================================================== // THREAD PARAMETERS //====================================================================================================================================================== int bx = blockIdx.x; // get current horizontal block index (0-n) int tx = threadIdx.x; // get current horizontal thread index (0-n) int ei_new; //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // GENERATE TEMPLATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // generate templates based on the first frame only if(d_common_change.frame_no == 0){ //====================================================================================================================================================== // GET POINTER TO TEMPLATE FOR THE POINT //====================================================================================================================================================== // pointers to: current template for current point d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; //====================================================================================================================================================== // UPDATE ROW LOC AND COL LOC //====================================================================================================================================================== // uptade temporary endo/epi row/col coordinates (in each block corresponding to point, narrow work to one thread) ei_new = tx; if(ei_new == 0){ // update temporary row/col coordinates pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no]; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no]; } //====================================================================================================================================================== // CREATE TEMPLATES //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[col*d_common.in_rows+row] = d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // PROCESS POINTS //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // process points in all frames except for the first one if(d_common_change.frame_no != 0){ //====================================================================================================================================================== // SELECTION //====================================================================================================================================================== in2_rowlow = d_unique[bx].d_Row[d_unique[bx].point_no] - d_common.sSize; // (1 to n+1) in2_collow = d_unique[bx].d_Col[d_unique[bx].point_no] - d_common.sSize; // work ei_new = tx; while(ei_new < d_common.in2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_rows == 0){ row = d_common.in2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + in2_rowlow - 1; ori_col = col + in2_collow - 1; d_unique[bx].d_in2[ei_new] = d_common_change.d_frame[ori_col*d_common.frame_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CONVOLUTION //====================================================================================================================================================== //==================================================================================================== // ROTATION //==================================================================================================== // variables d_in = &d_unique[bx].d_T[d_unique[bx].in_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // execution rot_row = (d_common.in_rows-1) - row; rot_col = (d_common.in_rows-1) - col; d_in_mod_temp[ei_new] = d_in[rot_col*d_common.in_rows+rot_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // ACTUAL CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.conv_rows; // (1-n) jc = (ei_new+1) / d_common.conv_rows + 1; // (1-n) if((ei_new+1) % d_common.conv_rows == 0){ ic = d_common.conv_rows; jc = jc-1; } // j = jc + d_common.joffset; jp1 = j + 1; if(d_common.in2_cols < jp1){ ja1 = jp1 - d_common.in2_cols; } else{ ja1 = 1; } if(d_common.in_cols < j){ ja2 = d_common.in_cols; } else{ ja2 = j; } i = ic + d_common.ioffset; ip1 = i + 1; if(d_common.in2_rows < ip1){ ia1 = ip1 - d_common.in2_rows; } else{ ia1 = 1; } if(d_common.in_rows < i){ ia2 = d_common.in_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_in_mod_temp[d_common.in_rows*(ja-1)+ia-1] * d_unique[bx].d_in2[d_common.in2_rows*(jb-1)+ib-1]; } } //d_unique[bx].d_conv[d_common.conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_conv[ei_new] = s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PADD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2[ori_col*d_common.in2_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // CUMULATIVE SUM 2 //====================================================================================================================================================== //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sqr_elem){ temp = d_unique[bx].d_in2[ei_new]; d_unique[bx].d_in2_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // PAD ARRAY, VERTICAL CUMULATIVE SUM //==================================================================================================== //================================================== // PAD ARRAY //================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_elem){ // figure out row/col location in padded array row = (ei_new+1) % d_common.in2_pad_cumv_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_rows == 0){ row = d_common.in2_pad_cumv_rows - 1; col = col-1; } // execution if( row > (d_common.in2_pad_add_rows-1) && // do if has numbers in original array row < (d_common.in2_pad_add_rows+d_common.in2_sqr_rows) && col > (d_common.in2_pad_add_cols-1) && col < (d_common.in2_pad_add_cols+d_common.in2_sqr_cols)){ ori_row = row - d_common.in2_pad_add_rows; ori_col = col - d_common.in2_pad_add_cols; d_unique[bx].d_in2_pad_cumv[ei_new] = d_unique[bx].d_in2_sqr[ori_col*d_common.in2_sqr_rows+ori_row]; } else{ // do if otherwise d_unique[bx].d_in2_pad_cumv[ei_new] = 0; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // VERTICAL CUMULATIVE SUM //================================================== //work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_cols){ // figure out column position pos_ori = ei_new*d_common.in2_pad_cumv_rows; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_pad_cumv_rows; position = position + 1){ d_unique[bx].d_in2_pad_cumv[position] = d_unique[bx].d_in2_pad_cumv[position] + sum; sum = d_unique[bx].d_in2_pad_cumv[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_pad_cumv_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_pad_cumv_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_pad_cumv_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_pad_cumv_sel_rows == 0){ row = d_common.in2_pad_cumv_sel_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel_collow - 1; d_unique[bx].d_in2_pad_cumv_sel[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_rows == 0){ row = d_common.in2_sub_cumh_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_pad_cumv_sel2_rowlow - 1; ori_col = col + d_common.in2_pad_cumv_sel2_collow - 1; d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv[ori_col*d_common.in2_pad_cumv_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_elem){ // subtract d_unique[bx].d_in2_sub_cumh[ei_new] = d_unique[bx].d_in2_pad_cumv_sel[ei_new] - d_unique[bx].d_in2_sub_cumh[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // HORIZONTAL CUMULATIVE SUM //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_rows){ // figure out row position pos_ori = ei_new; // variables sum = 0; // loop through all rows for(position = pos_ori; position < pos_ori+d_common.in2_sub_cumh_elem; position = position + d_common.in2_sub_cumh_rows){ d_unique[bx].d_in2_sub_cumh[position] = d_unique[bx].d_in2_sub_cumh[position] + sum; sum = d_unique[bx].d_in2_sub_cumh[position]; } // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub_cumh_sel_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub_cumh_sel_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub_cumh_sel_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub_cumh_sel_rows == 0){ row = d_common.in2_sub_cumh_sel_rows - 1; col = col - 1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel_collow - 1; d_unique[bx].d_in2_sub_cumh_sel[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // SELECTION 2, SUBTRACTION //==================================================================================================== //================================================== // SELECTION 2 //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in2_sub2_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in2_sub2_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in2_sub2_rows == 0){ row = d_common.in2_sub2_rows - 1; col = col-1; } // figure out corresponding location in old matrix and copy values to new matrix ori_row = row + d_common.in2_sub_cumh_sel2_rowlow - 1; ori_col = col + d_common.in2_sub_cumh_sel2_collow - 1; d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh[ori_col*d_common.in2_sub_cumh_rows+ori_row]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //================================================== // SYNCHRONIZE THREADS //================================================== __syncthreads(); //================================================== // SUBTRACTION //================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ // subtract d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sub_cumh_sel[ei_new] - d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // FINAL //====================================================================================================================================================== //==================================================================================================== // DENOMINATOR A SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ temp = d_unique[bx].d_in2_sub2[ei_new]; temp2 = d_unique[bx].d_in2_sqr_sub2[ei_new] - (temp * temp / d_common.in_elem); if(temp2 < 0){ temp2 = 0; } d_unique[bx].d_in2_sqr_sub2[ei_new] = sqrt(temp2); // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // MULTIPLICATION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_sqr_elem){ temp = d_in[ei_new]; d_unique[bx].d_in_sqr[ei_new] = temp * temp; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN SUM //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in_cols){ sum = 0; for(i = 0; i < d_common.in_rows; i++){ sum = sum + d_in[ei_new*d_common.in_rows+i]; } in_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // IN_SQR SUM //==================================================================================================== ei_new = tx; while(ei_new < d_common.in_sqr_rows){ sum = 0; for(i = 0; i < d_common.in_sqr_cols; i++){ sum = sum + d_unique[bx].d_in_sqr[ei_new+d_common.in_sqr_rows*i]; } in_sqr_partial_sum[ei_new] = sum; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SUMMATION //==================================================================================================== if(tx == 0){ in_final_sum = 0; for(i = 0; i<d_common.in_cols; i++){ in_final_sum = in_final_sum + in_partial_sum[i]; } }else if(tx == 1){ in_sqr_final_sum = 0; for(i = 0; i<d_common.in_sqr_cols; i++){ in_sqr_final_sum = in_sqr_final_sum + in_sqr_partial_sum[i]; } } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR T //==================================================================================================== if(tx == 0){ mean = in_final_sum / d_common.in_elem; // gets mean (average) value of element in ROI mean_sqr = mean * mean; variance = (in_sqr_final_sum / d_common.in_elem) - mean_sqr; // gets variance of ROI deviation = sqrt(variance); // gets standard deviation of ROI denomT = sqrt(float(d_common.in_elem-1))*deviation; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // DENOMINATOR SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * denomT; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // NUMERATOR SAVE RESULT IN CONVOLUTION //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.conv_elem){ d_unique[bx].d_conv[ei_new] = d_unique[bx].d_conv[ei_new] - d_unique[bx].d_in2_sub2[ei_new] * in_final_sum / d_common.in_elem; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // CORRELATION SAVE RESULT IN CUMULATIVE SUM A2 //==================================================================================================== // work ei_new = tx; while(ei_new < d_common.in2_sub2_elem){ d_unique[bx].d_in2_sqr_sub2[ei_new] = d_unique[bx].d_conv[ei_new] / d_unique[bx].d_in2_sqr_sub2[ei_new]; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // TEMPLATE MASK CREATE //====================================================================================================================================================== cent = d_common.sSize + d_common.tSize + 1; if(d_common_change.frame_no == 0){ tMask_row = cent + d_unique[bx].d_Row[d_unique[bx].point_no] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_Col[d_unique[bx].point_no] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } else{ pointer = d_common_change.frame_no-1+d_unique[bx].point_no*d_common.no_frames; tMask_row = cent + d_unique[bx].d_tRowLoc[pointer] - d_unique[bx].d_Row[d_unique[bx].point_no] - 1; tMask_col = cent + d_unique[bx].d_tColLoc[pointer] - d_unique[bx].d_Col[d_unique[bx].point_no] - 1; } //work ei_new = tx; while(ei_new < d_common.tMask_elem){ location = tMask_col*d_common.tMask_rows + tMask_row; if(ei_new==location){ d_unique[bx].d_tMask[ei_new] = 1; } else{ d_unique[bx].d_tMask[ei_new] = 0; } //go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MASK CONVOLUTION //====================================================================================================================================================== // work ei_new = tx; while(ei_new < d_common.mask_conv_elem){ // figure out row/col location in array ic = (ei_new+1) % d_common.mask_conv_rows; // (1-n) jc = (ei_new+1) / d_common.mask_conv_rows + 1; // (1-n) if((ei_new+1) % d_common.mask_conv_rows == 0){ ic = d_common.mask_conv_rows; jc = jc-1; } // j = jc + d_common.mask_conv_joffset; jp1 = j + 1; if(d_common.mask_cols < jp1){ ja1 = jp1 - d_common.mask_cols; } else{ ja1 = 1; } if(d_common.tMask_cols < j){ ja2 = d_common.tMask_cols; } else{ ja2 = j; } i = ic + d_common.mask_conv_ioffset; ip1 = i + 1; if(d_common.mask_rows < ip1){ ia1 = ip1 - d_common.mask_rows; } else{ ia1 = 1; } if(d_common.tMask_rows < i){ ia2 = d_common.tMask_rows; } else{ ia2 = i; } s = 0; for(ja=ja1; ja<=ja2; ja++){ jb = jp1 - ja; for(ia=ia1; ia<=ia2; ia++){ ib = ip1 - ia; s = s + d_unique[bx].d_tMask[d_common.tMask_rows*(ja-1)+ia-1] * 1; } } // //d_unique[bx].d_mask_conv[d_common.mask_conv_rows*(jc-1)+ic-1] = s; d_unique[bx].d_mask_conv[ei_new] = d_unique[bx].d_in2_sqr_sub2[ei_new] * s; // go for second round ei_new = ei_new + NUMBER_THREADS; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); //====================================================================================================================================================== // MAXIMUM VALUE //====================================================================================================================================================== //==================================================================================================== // INITIAL SEARCH //==================================================================================================== ei_new = tx; while(ei_new < d_common.mask_conv_rows){ for(i=0; i<d_common.mask_conv_cols; i++){ largest_coordinate_current = ei_new*d_common.mask_conv_rows+i; largest_value_current = abs(d_unique[bx].d_mask_conv[largest_coordinate_current]); if(largest_value_current > largest_value){ largest_coordinate = largest_coordinate_current; largest_value = largest_value_current; } } par_max_coo[ei_new] = largest_coordinate; par_max_val[ei_new] = largest_value; // go for second round ei_new = ei_new + NUMBER_THREADS; } //==================================================================================================== // SYNCHRONIZE THREADS //==================================================================================================== __syncthreads(); //==================================================================================================== // FINAL SEARCH //==================================================================================================== if(tx == 0){ for(i = 0; i < d_common.mask_conv_rows; i++){ if(par_max_val[i] > fin_max_val){ fin_max_val = par_max_val[i]; fin_max_coo = par_max_coo[i]; } } // convert coordinate to row/col form largest_row = (fin_max_coo+1) % d_common.mask_conv_rows - 1; // (0-n) row largest_col = (fin_max_coo+1) / d_common.mask_conv_rows; // (0-n) column if((fin_max_coo+1) % d_common.mask_conv_rows == 0){ largest_row = d_common.mask_conv_rows - 1; largest_col = largest_col - 1; } // calculate offset largest_row = largest_row + 1; // compensate to match MATLAB format (1-n) largest_col = largest_col + 1; // compensate to match MATLAB format (1-n) offset_row = largest_row - d_common.in_rows - (d_common.sSize - d_common.tSize); offset_col = largest_col - d_common.in_cols - (d_common.sSize - d_common.tSize); pointer = d_common_change.frame_no+d_unique[bx].point_no*d_common.no_frames; d_unique[bx].d_tRowLoc[pointer] = d_unique[bx].d_Row[d_unique[bx].point_no] + offset_row; d_unique[bx].d_tColLoc[pointer] = d_unique[bx].d_Col[d_unique[bx].point_no] + offset_col; } //====================================================================================================================================================== // SYNCHRONIZE THREADS //====================================================================================================================================================== __syncthreads(); } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // COORDINATE AND TEMPLATE UPDATE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // time19 = clock(); // if the last frame in the bath, update template if(d_common_change.frame_no != 0 && (d_common_change.frame_no)%10 == 0){ // update coordinate loc_pointer = d_unique[bx].point_no*d_common.no_frames+d_common_change.frame_no; d_unique[bx].d_Row[d_unique[bx].point_no] = d_unique[bx].d_tRowLoc[loc_pointer]; d_unique[bx].d_Col[d_unique[bx].point_no] = d_unique[bx].d_tColLoc[loc_pointer]; // work ei_new = tx; while(ei_new < d_common.in_elem){ // figure out row/col location in new matrix row = (ei_new+1) % d_common.in_rows - 1; // (0-n) row col = (ei_new+1) / d_common.in_rows + 1 - 1; // (0-n) column if((ei_new+1) % d_common.in_rows == 0){ row = d_common.in_rows - 1; col = col-1; } // figure out row/col location in corresponding new template area in image and give to every thread (get top left corner and progress down and right) ori_row = d_unique[bx].d_Row[d_unique[bx].point_no] - 25 + row - 1; ori_col = d_unique[bx].d_Col[d_unique[bx].point_no] - 25 + col - 1; ori_pointer = ori_col*d_common.frame_rows+ori_row; // update template d_in[ei_new] = d_common.alpha*d_in[ei_new] + (1.00-d_common.alpha)*d_common_change.d_frame[ori_pointer]; // go for second round ei_new = ei_new + NUMBER_THREADS; } } } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // END OF FUNCTION //=============================================================================================================================================================================================================== //===============================================================================================================================================================================================================
the_stack
#define LBANN_SLICE_LAYER_INSTANTIATE #include "lbann/layers/transform/slice.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { using dim4 = gpu_lib::array<size_t, 4>; /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_input_dims[3] / bsize) x max_input_dims[2] x max_input_dims[1] */ template <typename T> __global__ void concat4d_kernel( size_t num_inputs, const T* __restrict__ * __restrict__ input_buffer_list, const dim4* __restrict__ input_dims_list, const dim4* __restrict__ input_strides_list, T* __restrict__ output_buffer, dim4 output_strides, const size_t* __restrict__ output_offset_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_inputs; ++j) { // Current input tensor const auto& input_buffer = input_buffer_list[j]; const auto& input_dims = input_dims_list[j]; const auto& input_strides = input_strides_list[j]; const auto& output_offset = output_offset_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<input_dims[0]; ++i0) { for (size_t i1=gidz; i1<input_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<input_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<input_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[output_offset + i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_output_dims[3] / bsize) x max_output_dims[2] x max_output_dims[1] * */ template <typename T> __global__ void slice4d_kernel( size_t num_outputs, const T* __restrict__ input_buffer, dim4 input_strides, const size_t* __restrict__ input_offset_list, T* __restrict__ * __restrict__ output_buffer_list, const dim4* __restrict__ output_dims_list, const dim4* __restrict__ output_strides_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_outputs; ++j) { // Current output tensor const auto& input_offset = input_offset_list[j]; auto& output_buffer = output_buffer_list[j]; const auto& output_dims = output_dims_list[j]; const auto& output_strides = output_strides_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<output_dims[0]; ++i0) { for (size_t i1=gidz; i1<output_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<output_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<output_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[input_offset + i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } } // namespace <anon> template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Tensor views have already been setup in fp_setup_outputs } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Stack Elemental matrices on top of each other // Note: Assume each mini-batch sample is flat. auto& input_grad = l.get_error_signals(); std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> input_grad_v( input_grad.Construct(input_grad.Grid(), input_grad.Root())); size_t offset = l.m_slice_points.front(); for (size_t j=0; j<static_cast<size_t>(l.get_num_children()); ++j) { const auto& output_grad = l.get_prev_error_signals(j); El::View(*input_grad_v, input_grad, El::IR(offset, offset+output_grad.Height()), El::ALL); El::Copy(output_grad, *input_grad_v); offset += output_grad.Height(); } } template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; const auto& input = l.get_prev_activations(); const auto& local_input = dynamic_cast<const LocalMatrix&>(input.LockedMatrix()); auto sync_info = gpu::get_sync_info(local_input); // Get dimensions and strides for each output tensor const size_t num_outputs = l.get_num_children(); std::vector<TensorDataType*> output_buffer_list; std::vector<dim4> output_dims_list, output_strides_list; dim4 max_output_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { auto& output = l.get_activations(j); const auto& output_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_dims.rbegin(), output_dims.rend()); std::vector<size_t> rstrides(output_dims.size(), 1); for (size_t d=1; d<output_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output.LocalWidth()); rstrides.push_back(output.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_buffer_list.push_back(output.Buffer()); output_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_dims[i] = std::max(max_output_dims[i], rdims[3-i]); } } // Get strides for input tensor dim4 input_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input.Width()); rstrides.push_back(local_input.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute each output tensor's offset in input tensor const size_t slice_dim_stride = input_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_offset_list; for (const auto& slice_point : l.m_slice_points) { input_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(size_t) * input_offset_list.size() + sizeof(TensorDataType*) * output_buffer_list.size() + sizeof(dim4) * output_dims_list.size() + sizeof(dim4) * output_strides_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], input_offset_list.data(), sizeof(size_t) * input_offset_list.size()); pos += sizeof(size_t) * input_offset_list.size(); std::memcpy(&l.m_workspace[pos], output_buffer_list.data(), sizeof(TensorDataType*) * output_buffer_list.size()); pos += sizeof(TensorDataType*) * output_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_dims_list.data(), sizeof(dim4) * output_dims_list.size()); pos += sizeof(dim4) * output_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_strides_list.data(), sizeof(dim4) * output_strides_list.size()); pos += sizeof(dim4) * output_strides_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_input_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_offset_list.size(); auto&& device_output_buffer_list = reinterpret_cast<TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_buffer_list.size(); auto&& device_output_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_dims_list.size(); auto&& device_output_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_strides_list.size(); // Launch GPU kernel const auto& max_output_size = (max_output_dims[0] * max_output_dims[1] * max_output_dims[2] * max_output_dims[3]); if (max_output_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_dims[2]; grid_dims.z = max_output_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( slice4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, local_input.LockedBuffer(), input_strides, device_input_offset_list, device_output_buffer_list, device_output_dims_list, device_output_strides_list); } } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input gradient tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; auto& input_grad = l.get_error_signals(); auto& local_input_grad = dynamic_cast<LocalMatrix&>(input_grad.Matrix()); auto sync_info = gpu::get_sync_info(local_input_grad); // Get dimensions and strides for each output gradient tensor const size_t num_outputs = l.get_num_children(); std::vector<const TensorDataType*> output_grad_buffer_list; std::vector<dim4> output_grad_dims_list, output_grad_strides_list; dim4 max_output_grad_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { const auto& output_grad = l.get_prev_error_signals(j); const auto& output_grad_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_grad_dims.rbegin(), output_grad_dims.rend()); std::vector<size_t> rstrides(output_grad_dims.size(), 1); for (size_t d=1; d<output_grad_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output_grad.LocalWidth()); rstrides.push_back(output_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_grad_buffer_list.push_back(output_grad.LockedBuffer()); output_grad_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_grad_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_grad_dims[i] = std::max(max_output_grad_dims[i], rdims[3-i]); } } // Get strides for input gradient tensor dim4 input_grad_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input_grad.Width()); rstrides.push_back(local_input_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_grad_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute offsets in input gradient tensor const size_t slice_dim_stride = input_grad_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_grad_offset_list; for (const auto& slice_point : l.m_slice_points) { input_grad_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(TensorDataType*) * output_grad_buffer_list.size() + sizeof(dim4) * output_grad_dims_list.size() + sizeof(dim4) * output_grad_strides_list.size() + sizeof(size_t) * input_grad_offset_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], output_grad_buffer_list.data(), sizeof(TensorDataType*) * output_grad_buffer_list.size()); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_dims_list.data(), sizeof(dim4) * output_grad_dims_list.size()); pos += sizeof(dim4) * output_grad_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_strides_list.data(), sizeof(dim4) * output_grad_strides_list.size()); pos += sizeof(dim4) * output_grad_strides_list.size(); std::memcpy(&l.m_workspace[pos], input_grad_offset_list.data(), sizeof(size_t) * input_grad_offset_list.size()); pos += sizeof(size_t) * input_grad_offset_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_output_grad_buffer_list = reinterpret_cast<const TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); auto&& device_output_grad_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_dims_list.size(); auto&& device_output_grad_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_strides_list.size(); auto&& device_input_grad_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_grad_offset_list.size(); // Launch GPU kernel const auto& max_output_grad_size = (max_output_grad_dims[0] * max_output_grad_dims[1] * max_output_grad_dims[2] * max_output_grad_dims[3]); if (max_output_grad_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_grad_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_grad_dims[2]; grid_dims.z = max_output_grad_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( concat4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, device_output_grad_buffer_list, device_output_grad_dims_list, device_output_grad_strides_list, local_input_grad.Buffer(), input_grad_strides, device_input_grad_offset_list); } } // Explicit instantiation #define PROTO(T) \ template class slice_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class slice_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
the_stack
#pragma once #include "PerWarpPatchGeometryStage.cuh" #include "BinTileSpace.cuh" #include "BinTileRasterizationStage.cuh" #include "framebuffer.cuh" #include "shader.cuh" #include <meta_utils.h> #include <ptx_primitives.cuh> #include <cub/cub.cuh> extern "C" { __device__ int geometryProducingBlocksCount; } template <bool ENABLE, unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, unsigned int DYNAMIC_RASTERIZERS, class InputVertexAttributes, class PrimitiveType, class VertexShader, class CoverageShader, class FragmentShader, class BlendOp> class Pipeline; template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, unsigned int DYNAMIC_RASTERIZERS, class InputVertexAttributes, class PrimitiveType, class VertexShader, class CoverageShader, class FragmentShader, class BlendOp> class Pipeline<false, NUM_RASTERIZERS, NUM_WARPS, DYNAMIC_RASTERIZERS, InputVertexAttributes, PrimitiveType, VertexShader, CoverageShader, FragmentShader, BlendOp> { public: __device__ static void run() { if (blockIdx.x == 0 && threadIdx.x == 0) printf("WARNING: pipeline disabled\n"); } }; template <unsigned int NUM_RASTERIZERS, unsigned int NUM_WARPS, class InputVertexAttributes, class PrimitiveType, class VertexShader, class CoverageShader, class FragmentShader, class BlendOp> class Pipeline<true, NUM_RASTERIZERS, NUM_WARPS, 0, InputVertexAttributes, PrimitiveType, VertexShader, CoverageShader, FragmentShader, BlendOp> { typedef PatternTileSpace<PATTERN_TECHNIQUE, NUM_RASTERIZERS, 8192, 8192, 8, 8, 8, 8> UsedBinTileSpace; typedef BinTileRasterizationStage<NUM_RASTERIZERS, NUM_WARPS, BINRASTER_EXCLUSIVE_TILE_ACCESS_MODE, ENFORCE_PRIMITIVE_ORDER, FORCE_QUAD_SHADING, UsedBinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> RasterizationStage; typedef PerWarpPatchCachedGeometryStage<NUM_WARPS, InputVertexAttributes, PrimitiveType, VertexShader, FragmentShader, RasterizationStage, CLIPPING> GeometryStage; struct SharedState { union { __align__(16) char rasterization_stage_shared_memory[RasterizationStage::SHARED_MEMORY]; __align__(16) char geometry_stage_shared_memory[GeometryStage::SHARED_MEMORY]; }; }; public: __device__ static void run() { __shared__ SharedState shared_memory; Instrumentation::BlockObserver<0, 0> observer; __shared__ volatile int runstate[6]; //runstate: // 0 geometry active // 1 rasterizer efficient // 2 rasterizer full // 3 require sortrun // 4 require rasterrun // 5 other geom running runstate[0] = true; runstate[1] = false; runstate[2] = false; runstate[5] = true; __syncthreads(); while (runstate[3] || runstate[4] || runstate[5]) { if (runstate[0] && !runstate[1]) { RasterizationStage::writeCanNotReceiveAllNoSync(&runstate[2]); __syncthreads(); if (!runstate[2]) { if (!GeometryStage::run(shared_memory.geometry_stage_shared_memory)) { if (threadIdx.x == 0) { atomicSub(&geometryProducingBlocksCount, 1); runstate[0] = false; } } } } __syncthreads(); runstate[3] = RasterizationStage::prepareRun(shared_memory.rasterization_stage_shared_memory, &runstate[1]); __syncthreads(); if (!runstate[0] || runstate[1] || runstate[2]) { runstate[4] = RasterizationStage::run(shared_memory.rasterization_stage_shared_memory); runstate[2] = false; } runstate[5] = ldg_cg(&geometryProducingBlocksCount) != 0; __syncthreads(); } } }; template<int PER_THREAD_CHECK, int PER_THREAD_SORT, unsigned int DYNAMIC_RASTERIZERS, unsigned int NUM_THREADS, bool PRESORT> struct PipelineSortingElements { static_assert(PER_THREAD_CHECK <= PER_THREAD_SORT, "when PRESORT is false, PER_THREAD_CHECK <= PER_THREAD_SORT must hold"); static constexpr int SORTING_ELEMENTS = PER_THREAD_CHECK; template<typename F> __device__ static void loadin(unsigned int(&counts)[SORTING_ELEMENTS], int(&ids)[SORTING_ELEMENTS], F f) { #pragma unroll for (int i = 0; i < SORTING_ELEMENTS; i++) { int id = i * NUM_THREADS + threadIdx.x; if (id < DYNAMIC_RASTERIZERS) { counts[i] = f(id); ids[i] = id; } else { counts[i] = 0; ids[i] = -1; } } } }; template<int PER_THREAD_CHECK, int PER_THREAD_SORT, unsigned int DYNAMIC_RASTERIZERS, unsigned int NUM_THREADS> struct PipelineSortingElements<PER_THREAD_CHECK, PER_THREAD_SORT, DYNAMIC_RASTERIZERS, NUM_THREADS, true> { static constexpr int SORTING_ELEMENTS = PER_THREAD_SORT; template<typename F> __device__ static void loadin(unsigned int(&counts)[SORTING_ELEMENTS], int(&ids)[SORTING_ELEMENTS], F f) { #pragma unroll for (int i = 0; i < SORTING_ELEMENTS; i++) counts[i] = 0; #pragma unroll for (int i = 0; i < PER_THREAD_CHECK; i++) { int id = threadIdx.x + i*NUM_THREADS; unsigned int count = 0; if (id < DYNAMIC_RASTERIZERS) count = f(id); #pragma unroll for (int j = 0; j < SORTING_ELEMENTS; j++) { if (count > counts[j]) { unsigned int tc = count; int ti = id; count = counts[j]; counts[j] = tc; id = ids[j]; ids[j] = ti; } } } } }; class VirtualRasterizerId { __device__ static int& r() { __shared__ int currentRasterizer; return currentRasterizer; } public: __device__ static int rasterizer() { return r(); } __device__ static void switchRasterizer(int i) { r() = i; } }; template <bool ENABLE, unsigned int NUM_BLOCKS, unsigned int NUM_WARPS, unsigned int DYNAMIC_RASTERIZERS, class InputVertexAttributes, class PrimitiveType, class VertexShader, class CoverageShader, class FragmentShader, class BlendOp> class Pipeline { static constexpr int NUM_THREADS = NUM_WARPS * WARP_SIZE; static constexpr int VIRTUAL_RASTERIZER_TO_BLOCK_RATIO = (DYNAMIC_RASTERIZERS + NUM_THREADS - 1) / NUM_THREADS; static constexpr int MAX_LOCAL_SORT = 2; typedef PipelineSortingElements<VIRTUAL_RASTERIZER_TO_BLOCK_RATIO, MAX_LOCAL_SORT, DYNAMIC_RASTERIZERS, NUM_THREADS, false> Sorter; // (VIRTUAL_RASTERIZER_TO_BLOCK_RATIO > MAX_LOCAL_SORT) > Sorter; static constexpr int SORTING_ELEMENTS = Sorter::SORTING_ELEMENTS; static constexpr int SORT_MAX_BITS = 33 - static_clz<RASTERIZER_QUEUE_SIZE>::value; typedef PatternTileSpace<PATTERN_TECHNIQUE, DYNAMIC_RASTERIZERS, 8192, 8192, 8, 8, 8, 8, VirtualRasterizerId> UsedBinTileSpace; typedef BinTileRasterizationStage<DYNAMIC_RASTERIZERS, NUM_WARPS, BINRASTER_EXCLUSIVE_TILE_ACCESS_MODE, ENFORCE_PRIMITIVE_ORDER, FORCE_QUAD_SHADING, UsedBinTileSpace, CoverageShader, FragmentShader, FrameBuffer, BlendOp> RasterizationStage; typedef PerWarpPatchCachedGeometryStage<NUM_WARPS, InputVertexAttributes, PrimitiveType, VertexShader, FragmentShader, RasterizationStage, CLIPPING> GeometryStage; typedef cub::BlockRadixSort<unsigned int, NUM_THREADS, SORTING_ELEMENTS, int> VirtualRasterSorting; struct SharedState { union { __align__(16) char rasterization_stage_shared_memory[RasterizationStage::SHARED_MEMORY+1000]; __align__(16) char geometry_stage_shared_memory[GeometryStage::SHARED_MEMORY+1000]; __align__(16) typename VirtualRasterSorting::TempStorage virtualraster_sorting_storage; }; }; __device__ static bool acquireRasterizer(typename VirtualRasterSorting::TempStorage& virtualraster_sorting_storage, bool only_efficient) { unsigned int counts[SORTING_ELEMENTS]; int ids[SORTING_ELEMENTS]; Sorter::loadin(counts, ids, [&](int id) { if (!virtual_rasterizers.isRasterizerActive(id)) { int res = max(0, RasterizationStage::fillLevelNoCheck(id)); return res; } return 0; }); //TODO: set max sorting bits to queue size! //sort according to fill level VirtualRasterSorting(virtualraster_sorting_storage).SortDescending(counts, ids, 0, SORT_MAX_BITS); //try to acquire (TODO: add some kind of round robin?) for (int n = 0; n < NUM_THREADS; ++n) { bool found = false; if (threadIdx.x == n) { #pragma unroll for (int i = 0; i < SORTING_ELEMENTS; ++i) { unsigned int threshold = only_efficient ? (DYNAMIC_RASTERIZER_EFFICIENT_THRESHOLD ? DYNAMIC_RASTERIZER_EFFICIENT_THRESHOLD : NUM_THREADS) : 1; if (counts[i] >= threshold && ids[i] >= 0) { if (virtual_rasterizers.setRasterizerActive(ids[i])) { VirtualRasterizerId::switchRasterizer(ids[i]); found = true; break; } } } } if (__syncthreads_or(found)) return true; } return false; } __device__ static void freeMyRasterizer() { if (threadIdx.x == 0) { virtual_rasterizers.setRasterizerInactive(VirtualRasterizerId::rasterizer()); } } public: __device__ static void run() { __shared__ SharedState shared_memory; Instrumentation::BlockObserver<0, 0> observer; __shared__ volatile int runstate[5]; runstate[0] = true; runstate[3] = false; runstate[4] = false; __syncthreads(); //runstate: // 0 geometry active // 1 rasterizer active // 2 rasterizer efficient // 3 can not run geometry // 4 rasterizer aquired while (runstate[0] || runstate[1]) { if (runstate[0] && !runstate[4]) { RasterizationStage::writeIterateCanNotReceiveAllNoSync(&runstate[3]); __syncthreads(); if (!runstate[3]) { if (!GeometryStage::run(shared_memory.geometry_stage_shared_memory)) { if (threadIdx.x == 0) { atomicSub(&geometryProducingBlocksCount, 1); runstate[0] = false; } } } } __syncthreads(); if (!runstate[4]) { runstate[1] = ldg_cg(&geometryProducingBlocksCount) != 0; __syncthreads(); runstate[4] = acquireRasterizer(shared_memory.virtualraster_sorting_storage, runstate[0] && !runstate[3]); } if (runstate[4]) { runstate[1] = true; RasterizationStage::prepareRun(shared_memory.rasterization_stage_shared_memory, &runstate[2]); __syncthreads(); if (!RasterizationStage::run(shared_memory.rasterization_stage_shared_memory)) { runstate[4] = false; freeMyRasterizer(); } } runstate[3] = false; __syncthreads(); } } }; #endif // INCLUDED_CURE_PIPELINE
the_stack
#include "nvblox/integrators/esdf_integrator.h" namespace nvblox { EsdfIntegrator::~EsdfIntegrator() { if (cuda_stream_ != nullptr) { cudaStreamDestroy(cuda_stream_); } } void EsdfIntegrator::integrateBlocksOnGPU( const TsdfLayer& tsdf_layer, const std::vector<Index3D>& block_indices, EsdfLayer* esdf_layer) { timing::Timer esdf_timer("esdf/integrate"); if (block_indices.empty()) { return; } // First, check if the stream exists. If not, create one. if (cuda_stream_ == nullptr) { checkCudaErrors(cudaStreamCreate(&cuda_stream_)); } timing::Timer allocate_timer("esdf/integrate/allocate"); // First, allocate all the destination blocks. allocateBlocksOnCPU(block_indices, esdf_layer); allocate_timer.Stop(); timing::Timer mark_timer("esdf/integrate/mark_sites"); // Then, mark all the sites on GPU. // This finds all the blocks that are eligible to be parents. std::vector<Index3D> blocks_with_sites; std::vector<Index3D> blocks_to_clear; markAllSitesOnGPU(tsdf_layer, block_indices, esdf_layer, &blocks_with_sites, &blocks_to_clear); mark_timer.Stop(); std::vector<Index3D> cleared_blocks; if (!blocks_to_clear.empty()) { timing::Timer compute_timer("esdf/integrate/clear"); clearInvalidOnGPU(blocks_to_clear, esdf_layer, &cleared_blocks); std::vector<Index3D> all_clear_updated; } timing::Timer compute_timer("esdf/integrate/compute"); // Parallel block banding on GPU. computeEsdfOnGPU(blocks_with_sites, esdf_layer); if (!cleared_blocks.empty()) { computeEsdfOnGPU(cleared_blocks, esdf_layer); } compute_timer.Stop(); } void EsdfIntegrator::integrateSliceOnGPU( const TsdfLayer& tsdf_layer, const std::vector<Index3D>& block_indices, float z_min, float z_max, float z_output, EsdfLayer* esdf_layer) { timing::Timer esdf_timer("esdf/integrate_slice"); if (block_indices.empty()) { return; } // First, check if the stream exists. If not, create one. if (cuda_stream_ == nullptr) { checkCudaErrors(cudaStreamCreate(&cuda_stream_)); } timing::Timer allocate_timer("esdf/integrate_slice/allocate"); // First, allocate all the destination blocks. allocateBlocksOnCPU(block_indices, esdf_layer); allocate_timer.Stop(); timing::Timer mark_timer("esdf/integrate_slice/mark_sites"); // Then, mark all the sites on GPU. // This finds all the blocks that are eligible to be parents. std::vector<Index3D> blocks_with_sites; std::vector<Index3D> blocks_to_clear; markSitesInSliceOnGPU(tsdf_layer, block_indices, z_min, z_max, z_output, esdf_layer, &blocks_with_sites, &blocks_to_clear); mark_timer.Stop(); std::vector<Index3D> cleared_blocks; if (!blocks_to_clear.empty()) { timing::Timer compute_timer("esdf/integrate_slice/clear"); clearInvalidOnGPU(blocks_to_clear, esdf_layer, &cleared_blocks); std::vector<Index3D> all_clear_updated; } timing::Timer compute_timer("esdf/integrate_slice/compute"); // Parallel block banding on GPU. computeEsdfOnGPU(blocks_with_sites, esdf_layer); if (!cleared_blocks.empty()) { computeEsdfOnGPU(cleared_blocks, esdf_layer); } compute_timer.Stop(); } __device__ void clearVoxelDevice(EsdfVoxel* voxel, float max_squared_distance_vox) { voxel->parent_direction.setZero(); voxel->squared_distance_vox = max_squared_distance_vox; } // Takes in a vector of blocks, and outputs an integer true if that block is // meshable. // Block size MUST be voxels_per_side x voxels_per_side x voxel_per_size. // Grid size can be anything. __global__ void markAllSitesKernel(int num_blocks, const TsdfBlock** tsdf_blocks, EsdfBlock** esdf_blocks, float min_site_distance_m, float min_weight, float max_squared_distance_vox, bool* updated, bool* to_clear) { dim3 voxel_index = threadIdx; // This for loop allows us to have fewer threadblocks than there are // blocks in this computation. We assume the threadblock size is constant // though to make our lives easier. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // Get the correct voxel for this index. const TsdfVoxel* tsdf_voxel = &tsdf_blocks[block_index] ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; EsdfVoxel* esdf_voxel = &esdf_blocks[block_index] ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; if (tsdf_voxel->weight >= min_weight) { // Mark as inside if the voxel distance is negative. bool is_inside = tsdf_voxel->distance <= 0.0f; if (esdf_voxel->is_inside && is_inside == false) { clearVoxelDevice(esdf_voxel, max_squared_distance_vox); to_clear[block_index] = true; } esdf_voxel->is_inside = is_inside; if (is_inside && fabsf(tsdf_voxel->distance) <= min_site_distance_m) { esdf_voxel->is_site = true; esdf_voxel->squared_distance_vox = 0.0f; esdf_voxel->parent_direction.setZero(); updated[block_index] = true; } else { if (esdf_voxel->is_site) { esdf_voxel->is_site = false; // This voxel needs to be cleared. clearVoxelDevice(esdf_voxel, max_squared_distance_vox); to_clear[block_index] = true; } else if (!esdf_voxel->observed) { // This is a brand new voxel. clearVoxelDevice(esdf_voxel, max_squared_distance_vox); } else if (esdf_voxel->squared_distance_vox <= 1e-4) { // This is an invalid voxel that should be cleared. clearVoxelDevice(esdf_voxel, max_squared_distance_vox); to_clear[block_index] = true; } } esdf_voxel->observed = true; } } } // From: // https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ __forceinline__ float atomicMinFloat(float* addr, float value) { float old; old = (value >= 0) ? __int_as_float(atomicMin((int*)addr, __float_as_int(value))) : __uint_as_float( atomicMax((unsigned int*)addr, __float_as_uint(value))); return old; } /// Thread size MUST be 8x8x8, block size can be anything. __global__ void markSitesInSliceKernel( int num_input_blocks, int num_output_blocks, const TsdfBlock** tsdf_blocks, EsdfBlock** esdf_blocks, int output_voxel_index, int input_min_voxel_index, int input_max_voxel_index, float min_site_distance_m, float min_weight, float max_squared_distance_vox, bool* updated, bool* cleared) { dim3 voxel_index = threadIdx; voxel_index.z = output_voxel_index; int layer_index = threadIdx.z; int num_layers = blockDim.z; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; __shared__ EsdfVoxel new_values[kVoxelsPerSide][kVoxelsPerSide]; __shared__ bool observed[kVoxelsPerSide][kVoxelsPerSide]; __shared__ float min_distance[kVoxelsPerSide][kVoxelsPerSide]; // Initialize these. if (layer_index == 0) { observed[voxel_index.x][voxel_index.y] = false; min_distance[voxel_index.x][voxel_index.y] = 100.0f; } __syncthreads(); // This for loop allows us to have fewer threadblocks than there are // blocks in this computation. We assume the threadblock size is constant // though to make our lives easier. for (int block_index = blockIdx.x; block_index < num_output_blocks; block_index += gridDim.x) { // Get the correct block for this. const TsdfBlock* tsdf_block = tsdf_blocks[block_index + num_output_blocks * layer_index]; // There's also null pointers in there. if (tsdf_block != nullptr) { // Iterate over all of the voxels in this block. int start_index = 0; int end_index = kVoxelsPerSide; if (layer_index == 0) { start_index = input_min_voxel_index; } if (layer_index == num_layers - 1) { end_index = input_max_voxel_index; } for (int i = start_index; i < end_index; i++) { const TsdfVoxel* tsdf_voxel = &tsdf_block->voxels[voxel_index.x][voxel_index.y][i]; // EsdfVoxel* new_voxel = &new_values[voxel_index.x][voxel_index.y]; // Get the correct voxel for this index. if (tsdf_voxel->weight >= min_weight) { observed[voxel_index.x][voxel_index.y] = true; atomicMinFloat(&min_distance[voxel_index.x][voxel_index.y], tsdf_voxel->distance); } } } // sync threads across everyone trying to update this voxel __syncthreads(); // Ok now only if we're layer 0 do we compare the new and old values and // decide what to output. if (layer_index == 0) { EsdfVoxel* esdf_voxel = &esdf_blocks[block_index] ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; // Case 0: Just skip it if it's unobserved. We don't care. if (!observed[voxel_index.x][voxel_index.y]) { continue; } // Determine if the new value puts us inside or in a site. bool is_inside = min_distance[voxel_index.x][voxel_index.y] <= 0.0f; bool is_site = fabsf(min_distance[voxel_index.x][voxel_index.y]) <= min_site_distance_m && is_inside; // First handle the case where the voxel is a site. if (is_site) { if (esdf_voxel->is_site) { // Ok whatever. Add to the site list. // Its existing values are fine. updated[block_index] = true; } else { // Wasn't a site before, is now. esdf_voxel->observed = true; esdf_voxel->is_site = true; clearVoxelDevice(esdf_voxel, 0.0f); updated[block_index] = true; } } else { // Here we have to double-check what's going on. // If it was a site before, and isn't anymore, we have to clear it. if (esdf_voxel->is_site) { esdf_voxel->is_site = false; clearVoxelDevice(esdf_voxel, max_squared_distance_vox); cleared[block_index] = true; } // Otherwise just leave it alone unless it's brand new. if (!esdf_voxel->observed) { esdf_voxel->observed = true; clearVoxelDevice(esdf_voxel, max_squared_distance_vox); } else if (esdf_voxel->is_inside != is_inside) { // In case the sidedness swapped, clear the voxel. clearVoxelDevice(esdf_voxel, max_squared_distance_vox); cleared[block_index] = true; } else if (esdf_voxel->squared_distance_vox <= 0.0f) { // This is somehow invalidly marked as a site despite the fact // it shouldn't be. clearVoxelDevice(esdf_voxel, max_squared_distance_vox); cleared[block_index] = true; } } // Make the sidedness match. esdf_voxel->is_inside = is_inside; } } } __device__ void sweepSingleBand(Index3D voxel_index, int sweep_axis, float max_squared_distance_vox, EsdfBlock* esdf_block) { constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; Index3D last_site; bool site_found; // Sweep sweep sweep. // First we sweep forward, then backwards. for (int i = 0; i < 2; i++) { last_site = Index3D::Zero(); site_found = false; int direction = 1; int start_voxel = 0; int end_voxel = kVoxelsPerSide; if (i == 1) { direction = -1; start_voxel = kVoxelsPerSide - 1; end_voxel = -1; } for (voxel_index(sweep_axis) = start_voxel; voxel_index(sweep_axis) != end_voxel; voxel_index(sweep_axis) += direction) { EsdfVoxel* esdf_voxel = &esdf_block ->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; if (!esdf_voxel->observed) { continue; } // If this voxel is itself a site, then mark this for future voxels. if (esdf_voxel->is_site) { last_site = voxel_index; site_found = true; } else if (!site_found) { // If this voxel isn't a site but we haven't found a site yet, // then if this voxel is valid we set it as the site. if (esdf_voxel->squared_distance_vox < max_squared_distance_vox) { site_found = true; last_site = esdf_voxel->parent_direction + voxel_index; } } else { // If we've found the site, then should just decide what to do // here. Index3D potential_direction = last_site - voxel_index; float potential_distance = potential_direction.squaredNorm(); // Either it hasn't been set at all or it's closer to the site // than to its current value. if (esdf_voxel->squared_distance_vox > potential_distance) { esdf_voxel->parent_direction = potential_direction; esdf_voxel->squared_distance_vox = potential_distance; } else if (esdf_voxel->squared_distance_vox < max_squared_distance_vox) { // If the current value is a better site, then set it as a site. last_site = esdf_voxel->parent_direction + voxel_index; } } } } } __device__ bool updateSingleNeighbor(const EsdfBlock* esdf_block, const Index3D& voxel_index, const Index3D& neighbor_voxel_index, int axis, int direction, float max_squared_distance_vox, EsdfBlock* neighbor_block) { const EsdfVoxel* esdf_voxel = &esdf_block->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; EsdfVoxel* neighbor_voxel = &neighbor_block ->voxels[neighbor_voxel_index.x()][neighbor_voxel_index.y()] [neighbor_voxel_index.z()]; if (!esdf_voxel->observed || !neighbor_voxel->observed || neighbor_voxel->is_site || esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { return false; } // Determine if we can update this. Eigen::Vector3i potential_direction = esdf_voxel->parent_direction; potential_direction(axis) -= direction; float potential_distance = potential_direction.squaredNorm(); // TODO: might be some concurrency issues here, have to be a bit careful // on the corners/edges. if (neighbor_voxel->squared_distance_vox > potential_distance) { neighbor_voxel->parent_direction = potential_direction; neighbor_voxel->squared_distance_vox = potential_distance; return true; } return false; } __device__ bool clearSingleNeighbor(const EsdfBlock* esdf_block, const Index3D& voxel_index, const Index3D& neighbor_voxel_index, int axis, int direction, float max_squared_distance_vox, EsdfBlock* neighbor_block) { const EsdfVoxel* esdf_voxel = &esdf_block->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; EsdfVoxel* neighbor_voxel = &neighbor_block ->voxels[neighbor_voxel_index.x()][neighbor_voxel_index.y()] [neighbor_voxel_index.z()]; if (esdf_voxel->squared_distance_vox < max_squared_distance_vox || !esdf_voxel->observed || neighbor_voxel->is_site || neighbor_voxel->squared_distance_vox >= max_squared_distance_vox) { return false; } // Determine if we can update this. Index3D parent_voxel_dir = neighbor_voxel->parent_direction; if ((direction > 0 && parent_voxel_dir(axis) > 0) || (direction < 0 && parent_voxel_dir(axis) < 0)) { return false; } clearVoxelDevice(neighbor_voxel, max_squared_distance_vox); return true; } /// Thread size MUST be 8x8xN (where N is a number of blocks up to 8), block /// size can be anything. __global__ void sweepBlockBandKernel(int num_blocks, EsdfBlock** esdf_blocks, float max_squared_distance_vox) { // We go one axis at a time, syncing threads in between. dim3 thread_index = threadIdx; thread_index.z = 0; for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; block_index < num_blocks; block_index += gridDim.x * blockDim.z) { // For simplicity we have to have the same number of blocks in the CUDA // kernel call as we have actual blocks. EsdfBlock* esdf_block = esdf_blocks[block_index]; Index3D voxel_index(0, thread_index.x, thread_index.y); // X axis done. sweepSingleBand(voxel_index, 0, max_squared_distance_vox, esdf_block); __syncthreads(); // Y axis done. voxel_index << thread_index.x, 0, thread_index.y; sweepSingleBand(voxel_index, 1, max_squared_distance_vox, esdf_block); __syncthreads(); // Z axis done. voxel_index << thread_index.x, thread_index.y, 0; sweepSingleBand(voxel_index, 2, max_squared_distance_vox, esdf_block); __syncthreads(); } } /// Thread size MUST be 8x8xN, where N is the number of blocks processed at /// a time, block size can be anything. __global__ void updateLocalNeighborBandsKernel(int num_blocks, int i, EsdfBlock** esdf_blocks, int* neighbor_table, EsdfBlock** neighbor_pointers, float max_squared_distance_vox, bool* updated_neighbors) { // We go one axis at a time, syncing threads in between. dim3 thread_index = threadIdx; thread_index.z = 0; constexpr int kNumNeighbors = 6; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; block_index < num_blocks; block_index += gridDim.x * blockDim.z) { EsdfBlock* esdf_block = esdf_blocks[block_index]; Index3D voxel_index; Index3D neighbor_voxel_index; // Each thread updates 1 neighbors, set by "i". // Get the neighbor block. int neighbor_index = neighbor_table[block_index * kNumNeighbors + i]; if (neighbor_index < 0) { continue; } EsdfBlock* neighbor_block = neighbor_pointers[neighbor_index]; // Now we have the neighbor block... Let's figure out which voxels we // should look at. int axis = i / 2; int direction = i % 2 ? -1 : 1; // Fill in the axes. if (axis == 0) { voxel_index << 0, thread_index.x, thread_index.y; } else if (axis == 1) { voxel_index << thread_index.x, 0, thread_index.y; } else if (axis == 2) { voxel_index << thread_index.x, thread_index.y, 0; } neighbor_voxel_index = voxel_index; // If we're looking backwards... if (direction < 0) { voxel_index(axis) = 0; neighbor_voxel_index(axis) = kVoxelsPerSide - 1; } else { voxel_index(axis) = kVoxelsPerSide - 1; neighbor_voxel_index(axis) = 0; } bool updated = updateSingleNeighbor( esdf_block, voxel_index, neighbor_voxel_index, axis, direction, max_squared_distance_vox, neighbor_block); if (updated) { updated_neighbors[neighbor_index] = true; } } } /// Thread size MUST be 8x8x8, block size can be anything. __global__ void clearWithinBlockKernel(int num_blocks, EsdfBlock** esdf_blocks, float max_squared_distance_vox) { dim3 voxel_index = threadIdx; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; // Allow block size to be whatever. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // Get the voxel. EsdfVoxel* esdf_voxel = &esdf_blocks[block_index] ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; // Check if its parent is in the same block. if (!esdf_voxel->observed || esdf_voxel->is_site || esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { continue; } // Get the parent. Index3D parent_index = Index3D(voxel_index.x, voxel_index.y, voxel_index.z) + esdf_voxel->parent_direction; // Check if the voxel is within the same block. if (parent_index.x() < 0 || parent_index.x() >= kVoxelsPerSide || parent_index.y() < 0 || parent_index.y() >= kVoxelsPerSide || parent_index.z() < 0 || parent_index.z() >= kVoxelsPerSide) { continue; } // Ok check if the parent index is a site. if (!esdf_blocks[block_index] ->voxels[parent_index.x()][parent_index.y()][parent_index.z()] .is_site) { clearVoxelDevice(esdf_voxel, max_squared_distance_vox); } } } /// Thread size MUST be 8x8x8, block size can be anything. __global__ void clearInternalVoxelsKernel(int num_blocks, EsdfBlock** esdf_blocks, float max_squared_distance_vox) { dim3 voxel_index = threadIdx; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; // Allow block size to be whatever. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // Get the voxel. EsdfVoxel* esdf_voxel = &esdf_blocks[block_index] ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; if (!esdf_voxel->observed || esdf_voxel->is_site || esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { continue; } // Get the parent. Index3D parent_index = Index3D(voxel_index.x, voxel_index.y, voxel_index.z) + esdf_voxel->parent_direction; // Check if we're our own parent. This is definitely wrong since we're not // a site. if (esdf_voxel->parent_direction.isZero()) { clearVoxelDevice(esdf_voxel, max_squared_distance_vox); continue; } // Get the closest index to the parent within the same block. // We just get the nearest neighbor. Index3D closest_parent(min(max(parent_index.x(), 0), kVoxelsPerSide - 1), min(max(parent_index.y(), 0), kVoxelsPerSide - 1), min(max(parent_index.z(), 0), kVoxelsPerSide - 1)); // Ok check if the parent index is a site. // TODO: Check if we need the observed rule or not... const EsdfVoxel& neighbor_voxel = esdf_blocks[block_index]->voxels[closest_parent.x()][closest_parent.y()] [closest_parent.z()]; if (!neighbor_voxel.observed || neighbor_voxel.squared_distance_vox >= max_squared_distance_vox) { clearVoxelDevice(esdf_voxel, max_squared_distance_vox); } } } /// Thread size MUST be 8x8xN, where N is the number of blocks processed at /// a time, block size can be anything. __global__ void clearLocalNeighborBandsKernel(int num_blocks, int i, EsdfBlock** esdf_blocks, int* neighbor_table, EsdfBlock** neighbor_pointers, float max_squared_distance_vox, bool* updated_neighbors) { // We go one axis at a time, syncing threads in between. dim3 thread_index = threadIdx; thread_index.z = 0; constexpr int kNumNeighbors = 6; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; block_index < num_blocks; block_index += gridDim.x * blockDim.z) { EsdfBlock* esdf_block = esdf_blocks[block_index]; Index3D voxel_index; Index3D neighbor_voxel_index; // Each thread updates 1 neighbors, set by "i". // Get the neighbor block. int neighbor_index = neighbor_table[block_index * kNumNeighbors + i]; if (neighbor_index < 0) { continue; } EsdfBlock* neighbor_block = neighbor_pointers[neighbor_index]; // Now we have the neighbor block... Let's figure out which voxels we // should look at. int axis = i / 2; int direction = i % 2 ? -1 : 1; // Fill in the axes. if (axis == 0) { voxel_index << 0, thread_index.x, thread_index.y; } else if (axis == 1) { voxel_index << thread_index.x, 0, thread_index.y; } else if (axis == 2) { voxel_index << thread_index.x, thread_index.y, 0; } neighbor_voxel_index = voxel_index; // If we're looking backwards... if (direction < 0) { voxel_index(axis) = 0; neighbor_voxel_index(axis) = kVoxelsPerSide - 1; } else { voxel_index(axis) = kVoxelsPerSide - 1; neighbor_voxel_index(axis) = 0; } bool updated = clearSingleNeighbor( esdf_block, voxel_index, neighbor_voxel_index, axis, direction, max_squared_distance_vox, neighbor_block); if (updated) { updated_neighbors[neighbor_index] = true; } } } void EsdfIntegrator::markAllSitesOnGPU( const TsdfLayer& tsdf_layer, const std::vector<Index3D>& block_indices, EsdfLayer* esdf_layer, std::vector<Index3D>* blocks_with_sites, std::vector<Index3D>* cleared_blocks) { CHECK_NOTNULL(esdf_layer); CHECK_NOTNULL(blocks_with_sites); // Caching. const float voxel_size = tsdf_layer.voxel_size(); const float max_distance_vox = max_distance_m_ / voxel_size; const float max_squared_distance_vox = max_distance_vox * max_distance_vox; // Cache the minimum distance in metric size. const float min_site_distance_m = min_site_distance_vox_ * voxel_size; int num_blocks = block_indices.size(); // Get all of the block pointers we need. tsdf_pointers_host_.resize(num_blocks); block_pointers_host_.resize(num_blocks); // Have an updated output variable as well. updated_blocks_device_.resize(num_blocks); updated_blocks_device_.setZero(); cleared_blocks_device_.resize(num_blocks); cleared_blocks_device_.setZero(); // Populate all the input vectors. for (size_t i = 0; i < num_blocks; i++) { const Index3D& block_index = block_indices[i]; EsdfBlock::Ptr esdf_block = esdf_layer->getBlockAtIndex(block_index); TsdfBlock::ConstPtr tsdf_block = tsdf_layer.getBlockAtIndex(block_index); if (!esdf_block || !tsdf_block) { LOG(ERROR) << "Somehow trying to update non-existent blocks!"; continue; } tsdf_pointers_host_[i] = tsdf_block.get(); block_pointers_host_[i] = esdf_block.get(); } // Copy what we need over to the device. tsdf_pointers_device_ = tsdf_pointers_host_; block_pointers_device_ = block_pointers_host_; // Call the kernel. int dim_block = num_blocks; constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); markAllSitesKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( num_blocks, tsdf_pointers_device_.data(), block_pointers_device_.data(), min_site_distance_m, min_weight_, max_squared_distance_vox, updated_blocks_device_.data(), cleared_blocks_device_.data()); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); // Copy out. updated_blocks_host_ = updated_blocks_device_; cleared_blocks_host_ = cleared_blocks_device_; // Get the output vector. // TODO(helen): swap this to a kernel operation. for (size_t i = 0; i < num_blocks; i++) { if (updated_blocks_host_[i]) { blocks_with_sites->push_back(block_indices[i]); } if (cleared_blocks_host_[i]) { cleared_blocks->push_back(block_indices[i]); } } } // 2D slice version of the markAllSites function above. void EsdfIntegrator::markSitesInSliceOnGPU( const TsdfLayer& tsdf_layer, const std::vector<Index3D>& block_indices, float min_z, float max_z, float output_z, EsdfLayer* esdf_layer, std::vector<Index3D>* updated_blocks, std::vector<Index3D>* cleared_blocks) { CHECK_NOTNULL(esdf_layer); CHECK_NOTNULL(updated_blocks); CHECK_NOTNULL(cleared_blocks); // Caching. const float voxel_size = tsdf_layer.voxel_size(); const float max_distance_vox = max_distance_m_ / voxel_size; const float max_squared_distance_vox = max_distance_vox * max_distance_vox; // Cache the minimum distance in metric size. const float min_site_distance_m = min_site_distance_vox_ * voxel_size; // We are going to subsample the block_indices. // We need to figure out all the output blocks, which will be a subset // of the input blocks. At the same time we need to get all of the stacks // of input blocks at all levels. // We are going to pull some "clever" stuff: the input block list will be // of length N * n_input_blocks, where "N" is the number of vertical // layers there could be that fall into the min z to max z range. // Ok first figure out how many layers we could have. Index3D min_block_index; Index3D min_voxel_index; getBlockAndVoxelIndexFromPositionInLayer(tsdf_layer.block_size(), Vector3f(0.0f, 0.0f, min_z), &min_block_index, &min_voxel_index); const int min_block_index_z = min_block_index.z(); const int min_voxel_index_z = min_voxel_index.z(); Index3D max_block_index; Index3D max_voxel_index; getBlockAndVoxelIndexFromPositionInLayer(tsdf_layer.block_size(), Vector3f(0.0f, 0.0f, max_z), &max_block_index, &max_voxel_index); const int max_block_index_z = max_block_index.z(); const int max_voxel_index_z = max_voxel_index.z(); // There is always at least 1 layer. int num_vertical_layers = max_block_index_z - min_block_index_z + 1; // And figure out what the index of the output voxel is. // std::pair<Index3D, Index3D> output_block_and_voxel_index Index3D output_block_index; Index3D output_voxel_index; getBlockAndVoxelIndexFromPositionInLayer( tsdf_layer.block_size(), Vector3f(0.0f, 0.0f, output_z), &output_block_index, &output_voxel_index); const int output_block_index_z = output_block_index.z(); const int output_voxel_index_z = output_voxel_index.z(); // Next get a list of all the valid input blocks. Index3DSet output_block_set; for (const Index3D& block_index : block_indices) { if (block_index.z() >= min_block_index_z && block_index.z() <= max_block_index_z) { output_block_set.insert( Index3D(block_index.x(), block_index.y(), output_block_index_z)); } } // Ok now we have all the indices we actually need. // Just have to get their pointers and we're good. size_t num_blocks = output_block_set.size(); if (num_blocks == 0) { return; } std::vector<Index3D> input_blocks(num_blocks * num_vertical_layers); std::vector<Index3D> output_blocks(num_blocks); tsdf_pointers_host_.resize(num_blocks * num_vertical_layers); tsdf_pointers_host_.setZero(); block_pointers_host_.resize(num_blocks); size_t i = 0; for (const Index3D& block_index : output_block_set) { // This is for the output block, which we allocate along the way. output_blocks[i] = block_index; block_pointers_host_[i] = esdf_layer->allocateBlockAtIndex(block_index).get(); // Go through all the relevant input pointers: Index3D input_block_index = block_index; int j = 0; for (input_block_index.z() = min_block_index_z; input_block_index.z() <= max_block_index_z; input_block_index.z()++) { input_blocks[i + num_blocks * j] = input_block_index; // This can be null. It's fine. tsdf_pointers_host_[i + num_blocks * j] = tsdf_layer.getBlockAtIndex(input_block_index).get(); j++; } i++; } // Copy what we need over to the device. tsdf_pointers_device_ = tsdf_pointers_host_; block_pointers_device_ = block_pointers_host_; // Finally, set up the updated and cleared vectors. updated_blocks_device_.resize(num_blocks); updated_blocks_device_.setZero(); cleared_blocks_device_.resize(num_blocks); cleared_blocks_device_.setZero(); // Call the kernel! int dim_block = num_blocks; constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, num_vertical_layers); markSitesInSliceKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( num_blocks, num_blocks, tsdf_pointers_device_.data(), block_pointers_device_.data(), output_voxel_index_z, min_voxel_index_z, max_voxel_index_z, min_site_distance_m, min_weight_, max_squared_distance_vox, updated_blocks_device_.data(), cleared_blocks_device_.data()); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); // Copy out. updated_blocks_host_ = updated_blocks_device_; cleared_blocks_host_ = cleared_blocks_device_; // Pack the outputs. The rest of the functions should work as before. for (size_t i = 0; i < output_blocks.size(); i++) { if (updated_blocks_host_[i]) { updated_blocks->push_back(output_blocks[i]); } if (cleared_blocks_host_[i]) { cleared_blocks->push_back(output_blocks[i]); } } } void EsdfIntegrator::clearInvalidOnGPU( const std::vector<Index3D>& blocks_to_clear, EsdfLayer* esdf_layer, std::vector<Index3D>* updated_blocks) { CHECK_NOTNULL(esdf_layer); CHECK_NOTNULL(updated_blocks); // Caching. const float voxel_size = esdf_layer->voxel_size(); const float max_distance_vox = max_distance_m_ / voxel_size; const float max_squared_distance_vox = max_distance_vox * max_distance_vox; int num_blocks = blocks_to_clear.size(); block_pointers_host_.resize(num_blocks); // Have an updated output variable as well. updated_blocks_device_.resize(num_blocks); updated_blocks_device_.setZero(); // Populate all the input vectors. for (size_t i = 0; i < num_blocks; i++) { const Index3D& block_index = blocks_to_clear[i]; block_pointers_host_[i] = esdf_layer->getBlockAtIndex(block_index).get(); } block_pointers_device_ = block_pointers_host_; // Alright now run a kernel to clear all the voxels within a block. constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); clearWithinBlockKernel<<<num_blocks, dim_threads, 0, cuda_stream_>>>( num_blocks, block_pointers_device_.data(), max_squared_distance_vox); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); // Then clear all the neighbors. Index3DSet all_cleared_blocks; std::copy(blocks_to_clear.begin(), blocks_to_clear.end(), std::inserter(all_cleared_blocks, all_cleared_blocks.end())); std::vector<Index3D> clear_list = blocks_to_clear; std::vector<Index3D> new_clear_list; VLOG(3) << "Blocks to clear: " << blocks_to_clear.size(); while (!clear_list.empty()) { clearBlockNeighbors(clear_list, esdf_layer, &new_clear_list); std::copy(new_clear_list.begin(), new_clear_list.end(), std::inserter(all_cleared_blocks, all_cleared_blocks.end())); std::swap(clear_list, new_clear_list); new_clear_list.clear(); VLOG(3) << "Clear list size: " << clear_list.size(); } for (const Index3D& index : all_cleared_blocks) { updated_blocks->push_back(index); } } void EsdfIntegrator::clearBlockNeighbors(std::vector<Index3D>& clear_list, EsdfLayer* esdf_layer, std::vector<Index3D>* new_clear_list) { int num_blocks = clear_list.size(); if (num_blocks == 0) { return; } constexpr int kNumNeighbors = 6; const float voxel_size = esdf_layer->voxel_size(); const float max_distance_vox = max_distance_m_ / voxel_size; const float max_squared_distance_vox = max_distance_vox * max_distance_vox; constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; dim3 dim_threads_per_voxel(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); // Step 0: block pointers. block_pointers_host_.resize(num_blocks); for (size_t i = 0; i < num_blocks; i++) { const Index3D& block_index = clear_list[i]; block_pointers_host_[i] = esdf_layer->getBlockAtIndex(block_index).get(); } block_pointers_device_ = block_pointers_host_; // Step 0a: fix up the blocks so their neighbors are valid. clearInternalVoxelsKernel<<<num_blocks, dim_threads_per_voxel, 0, cuda_stream_>>>( num_blocks, block_pointers_device_.data(), max_squared_distance_vox); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); // Step one: set up the neighbor table. std::vector<Index3D> neighbor_indices; neighbor_table_host_.resize(num_blocks * kNumNeighbors); neighbor_table_host_.setZero(); neighbor_pointers_host_.resize(0); createNeighborTable(clear_list, esdf_layer, &neighbor_indices, &neighbor_pointers_host_, &neighbor_table_host_); // Step two: run the neighbor updating kernel. updated_blocks_device_.resize(neighbor_indices.size()); updated_blocks_device_.setZero(); neighbor_pointers_device_ = neighbor_pointers_host_; neighbor_table_device_ = neighbor_table_host_; constexpr int kNumBlocksPerCudaBlock = 8; int dim_block = std::max( static_cast<int>( std::ceil(num_blocks / static_cast<float>(kNumBlocksPerCudaBlock))), 1); dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); // We have to do the neighbors one at a time basically for concurrency // issues. // No clue if the concurrency issues hold for the clearing operation. // But this is easier to copy-and-paste. for (int i = 0; i < kNumNeighbors; i++) { clearLocalNeighborBandsKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( num_blocks, i, block_pointers_device_.data(), neighbor_table_device_.data(), neighbor_pointers_device_.data(), max_squared_distance_vox, updated_blocks_device_.data()); } checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); // Repack into output vector. updated_blocks_host_ = updated_blocks_device_; block_pointers_host_.resize(0); new_clear_list->clear(); for (size_t i = 0; i < neighbor_indices.size(); i++) { if (updated_blocks_host_[i]) { new_clear_list->push_back(neighbor_indices[i]); block_pointers_host_.push_back(neighbor_pointers_host_[i]); } } // Step three: clear any remaining voxels on the interior of the blocks int num_updated_blocks = new_clear_list->size(); if (num_updated_blocks == 0) { return; } block_pointers_device_ = block_pointers_host_; clearInternalVoxelsKernel<<<num_updated_blocks, dim_threads_per_voxel, 0, cuda_stream_>>>(block_pointers_device_.size(), block_pointers_device_.data(), max_squared_distance_vox); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); } void EsdfIntegrator::computeEsdfOnGPU( const std::vector<Index3D>& blocks_with_sites, EsdfLayer* esdf_layer) { CHECK_NOTNULL(esdf_layer); // Cache everything. constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; const float voxel_size = esdf_layer->block_size() / kVoxelsPerSide; const float max_distance_vox = max_distance_m_ / voxel_size; const float max_squared_distance_vox = max_distance_vox * max_distance_vox; block_pointers_host_.resize(blocks_with_sites.size()); for (size_t i = 0; i < blocks_with_sites.size(); i++) { block_pointers_host_[i] = esdf_layer->getBlockAtIndex(blocks_with_sites[i]).get(); } // First we go over all of the blocks with sites. // We compute all the proximal sites inside the block first. block_pointers_device_ = block_pointers_host_; sweepBlockBandOnGPU(block_pointers_device_, max_squared_distance_vox); // Get the neighbors of all the blocks with sites. std::vector<Index3D> blocks_to_run = blocks_with_sites; std::vector<Index3D> updated_blocks; int i = 0; while (!blocks_to_run.empty()) { updateLocalNeighborBandsOnGPU(blocks_to_run, block_pointers_device_, max_squared_distance_vox, esdf_layer, &updated_blocks, &neighbor_pointers_device_); VLOG(3) << "Iteration: " << i << " Number of updated blocks: " << updated_blocks.size() << " blocks with sites: " << blocks_with_sites.size(); i++; sweepBlockBandOnGPU(neighbor_pointers_device_, max_squared_distance_vox); blocks_to_run = std::move(updated_blocks); block_pointers_device_ = neighbor_pointers_device_; } } void EsdfIntegrator::sweepBlockBandOnGPU( device_vector<EsdfBlock*>& block_pointers, float max_squared_distance_vox) { if (block_pointers.empty()) { return; } timing::Timer sweep_timer("esdf/integrate/compute/sweep"); // Caching. constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; const int num_blocks = block_pointers.size(); // Call the kernel. // We do 2-dimensional sweeps in this kernel. Each thread does 3 sweeps. // We do 8 blocks at a time. constexpr int kNumBlocksPerCudaBlock = 8; int dim_block = std::max( static_cast<int>( std::ceil(num_blocks / static_cast<float>(kNumBlocksPerCudaBlock))), 1); dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); sweepBlockBandKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( num_blocks, block_pointers.data(), max_squared_distance_vox); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); } void EsdfIntegrator::updateLocalNeighborBandsOnGPU( const std::vector<Index3D>& block_indices, device_vector<EsdfBlock*>& block_pointers, float max_squared_distance_vox, EsdfLayer* esdf_layer, std::vector<Index3D>* updated_blocks, device_vector<EsdfBlock*>* updated_block_pointers) { if (block_indices.empty()) { return; } timing::Timer neighbors_timer("esdf/integrate/compute/neighbors"); CHECK_EQ(block_indices.size(), block_pointers.size()); constexpr int kNumNeighbors = 6; constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; const int num_blocks = block_pointers.size(); timing::Timer table_timer("esdf/integrate/compute/neighbors/table"); // This one is quite a bit more complicated. // For each block, we need to get its 6 neighbors. std::vector<Index3D> neighbor_indices; neighbor_table_host_.resize(num_blocks * kNumNeighbors); neighbor_table_host_.setZero(); neighbor_pointers_host_.resize(0); createNeighborTable(block_indices, esdf_layer, &neighbor_indices, &neighbor_pointers_host_, &neighbor_table_host_); table_timer.Stop(); // Set up an updated map. updated_blocks_device_.resize(neighbor_indices.size()); updated_blocks_device_.setZero(); neighbor_pointers_device_ = neighbor_pointers_host_; neighbor_table_device_ = neighbor_table_host_; timing::Timer kernel_timer("esdf/integrate/compute/neighbors/kernel"); // Ok now we have to give all this stuff to the kernel. // TODO(helen): you get weird-ass concurrency issues if this is not 1. constexpr int kNumBlocksPerCudaBlock = 8; int dim_block = std::max( static_cast<int>( std::ceil(num_blocks / static_cast<float>(kNumBlocksPerCudaBlock))), 1); dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); // We have to do the neighbors one at a time basically for concurrency // issues. for (int i = 0; i < kNumNeighbors; i++) { updateLocalNeighborBandsKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( num_blocks, i, block_pointers.data(), neighbor_table_device_.data(), neighbor_pointers_device_.data(), max_squared_distance_vox, updated_blocks_device_.data()); } checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); checkCudaErrors(cudaPeekAtLastError()); kernel_timer.Stop(); // Unpack the kernel results. // TODO(helen): swap this to a kernel operation. updated_blocks->clear(); updated_blocks_host_ = updated_blocks_device_; block_pointers_host_.resize(0); for (size_t i = 0; i < neighbor_indices.size(); i++) { if (updated_blocks_host_[i]) { updated_blocks->push_back(neighbor_indices[i]); block_pointers_host_.push_back(neighbor_pointers_host_[i]); } } *updated_block_pointers = block_pointers_host_; } void EsdfIntegrator::createNeighborTable( const std::vector<Index3D>& block_indices, EsdfLayer* esdf_layer, std::vector<Index3D>* neighbor_indices, host_vector<EsdfBlock*>* neighbor_pointers, host_vector<int>* neighbor_table) { // TODO(helen): make this extensible to different number of neighbors. constexpr int kNumNeighbors = 6; int num_blocks = block_indices.size(); // Hash map mapping the neighbor index to the pointers above. Index3DHashMapType<int>::type neighbor_map; // Direction Shorthand: axis = neighbor_index/2 // direction = neighbor_index%2 ? -1 : 1 Index3D direction = Index3D::Zero(); for (int block_number = 0; block_number < num_blocks; block_number++) { const Index3D& block_index = block_indices[block_number]; for (int neighbor_number = 0; neighbor_number < kNumNeighbors; neighbor_number++) { direction.setZero(); // Change just one axis of the direction. direction(neighbor_number / 2) = neighbor_number % 2 ? -1 : 1; // Check if this is already in our hash. Index3D neighbor_index = block_index + direction; auto res = neighbor_map.find(neighbor_index); if (res != neighbor_map.end()) { (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = res->second; } else { // Doesn't exist in the neighbor list yet. EsdfBlock::Ptr esdf_block = esdf_layer->getBlockAtIndex(neighbor_index); if (esdf_block) { int next_index = neighbor_indices->size(); neighbor_indices->push_back(neighbor_index); neighbor_pointers->push_back(esdf_block.get()); neighbor_map[neighbor_index] = next_index; (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = next_index; } else { (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = -1; neighbor_map[neighbor_index] = -1; } } } } CHECK_EQ(neighbor_table->size(), kNumNeighbors * block_indices.size()); CHECK_EQ(neighbor_indices->size(), neighbor_pointers->size()); } } // namespace nvblox
the_stack
#include <common/types.h> #include <iterator> #include <algorithm> namespace sqaod_cuda { namespace sq = sqaod; template<template<class> class OpType, class real> struct OpOutPtr { typedef real value_type; typedef OpType<real> Op; explicit OpOutPtr(real *_d_data, sq::SizeType _stride, Op _op) : d_data(_d_data), stride(_stride), op(_op) { } void addYOffset(sq::IdxType yOffset) { d_data = &d_data[yOffset * stride]; } __device__ __forceinline__ Op operator*() const { return op(*d_data); } __device__ __forceinline__ Op operator[](sq::SizeType idx) const { return op(d_data[idx]); } __device__ __forceinline__ Op operator()(sq::SizeType x, sq::SizeType y) const { return op(d_data[x + y * stride]); } real *d_data; sq::SizeType stride; Op op; }; template<class real> struct NullOutOp { explicit NullOutOp() { } explicit __device__ __forceinline__ NullOutOp(real &_d_value) : d_value(&_d_value) { } __device__ __forceinline__ NullOutOp operator()(real &_d_value) const { return NullOutOp(_d_value); } __device__ __forceinline__ real operator=(const real &v) const { return *d_value = v; } real *d_value; }; template<class real> struct AddAssignOutOp { explicit AddAssignOutOp(real _addAssignFactor, real _alpha) : addAssignFactor(_addAssignFactor), alpha(_alpha) { } explicit __device__ AddAssignOutOp(real &_d_data, const AddAssignOutOp &op) : d_data(&_d_data), addAssignFactor(op.addAssignFactor), alpha(op.alpha) { } __device__ __forceinline__ AddAssignOutOp operator()(real &_d_value) const { return AddAssignOutOp<real>(_d_value, *this); } __device__ __forceinline__ real operator=(const real &v) const { return *d_data = addAssignFactor * *d_data + alpha * v; } real *d_data; real addAssignFactor; real alpha; }; template<class real> struct MulOutOp { explicit MulOutOp(real _alpha) : alpha(_alpha) { } explicit __device__ MulOutOp(real &_d_data, const MulOutOp &op) : d_data(&_d_data), alpha(op.alpha) { } __device__ __forceinline__ MulOutOp operator()(real &_d_data) const { return MulOutOp(_d_data, *this); } __device__ __forceinline__ real operator=(const real &v) const { return *d_data = alpha * v; } real *d_data; real alpha; }; template<class real> OpOutPtr<NullOutOp, real> NullOutPtr(real *d_data, sq::SizeType stride = 0) { return OpOutPtr<NullOutOp, real>(d_data, stride, NullOutOp<real>()); } template<class real> OpOutPtr<AddAssignOutOp, real> AddAssignOutPtr(real *d_data, real _mulFactor, real _alpha, sq::SizeType stride = 0) { return OpOutPtr<AddAssignOutOp, real>(d_data, stride, AddAssignOutOp<real>(_mulFactor, _alpha)); } template<class real> OpOutPtr<MulOutOp, real> MulOutPtr(real *d_data, real _mulFactor, sq::SizeType stride = 0) { return OpOutPtr<MulOutOp, real>(d_data, stride, MulOutOp<real>(_mulFactor)); } /* Input iterators */ template<class real> struct InPtr { typedef real value_type; explicit InPtr(const real *_d_data, sq::SizeType _stride = 0) : d_data(_d_data), stride(_stride) { } void addYOffset(sq::IdxType yOffset) { d_data = &d_data[yOffset * stride]; } __device__ __forceinline__ real operator*() const { return *d_data; } __device__ __forceinline__ real operator[](sq::SizeType idx) const { return d_data[idx]; } __device__ __forceinline__ real operator()(sq::SizeType x, sq::SizeType y) const { return d_data[x + y * stride]; } const real *d_data; sq::SizeType stride; }; template<class real> struct InScalarPtr { typedef real value_type; explicit InScalarPtr(const real *_d_data) : d_data(_d_data) { } void addYOffset(sq::IdxType yOffset) { } __device__ __forceinline__ real operator*() const { return *d_data; } __device__ __forceinline__ real operator[](sq::SizeType idx) const { return *d_data; } __device__ __forceinline__ real operator()(sq::SizeType x, sq::SizeType y) const { return *d_data; } const real *d_data; }; template<class real> struct InConstPtr { typedef real value_type; explicit InConstPtr(const real &_v) : v(_v) { } void addYOffset(sq::IdxType yOffset) { } __device__ __forceinline__ real operator*() const { return v; } __device__ __forceinline__ real operator[](sq::SizeType idx) const { return v; } __device__ __forceinline__ real operator()(sq::SizeType x, sq::SizeType y) const { return v; } real v; }; template<class real> struct InRowBroadcastPtr { typedef real value_type; explicit InRowBroadcastPtr(const real *_d_data) : d_data(_d_data) { } void addYOffset(sq::IdxType yOffset) { } __device__ __forceinline__ real operator[](sq::SizeType idx) const { return d_data[idx]; } __device__ __forceinline__ real operator()(sq::SizeType x, sq::SizeType y) const { return d_data[x]; } const real *d_data; }; template<class real> struct InColumnBroadcastPtr { typedef real value_type; explicit InColumnBroadcastPtr(const real *_d_data) : d_data(_d_data) { } void addYOffset(sq::IdxType yOffset) { d_data += yOffset; } __device__ __forceinline__ real operator[](sq::SizeType idx) const { return d_data[idx]; } __device__ __forceinline__ real operator()(sq::SizeType x, sq::SizeType y) const { return d_data[y]; } const real *d_data; }; template<class real> struct InDiagonalPtr { typedef real value_type; typedef InDiagonalPtr<real> SelfType; explicit __host__ __device__ __forceinline__ InDiagonalPtr(const real *_d_data, sq::SizeType _stride, sq::IdxType _xOffset, sq::IdxType _yOffset) : d_data(_d_data), stride(_stride), xOffset(_xOffset), yOffset(_yOffset) { } __device__ __forceinline__ real operator[](sq::SizeType idx) const { int x = idx + xOffset; int y = idx + yOffset; return d_data[x + y * stride]; } __device__ __forceinline__ SelfType operator+(sq::IdxType v) const { return SelfType(d_data, stride, xOffset + v, yOffset + v); } const real *d_data; int stride, xOffset, yOffset; }; /* iterators for specific use cases*/ template<class real> struct InPtrWithInterval { typedef real value_type; typedef InPtrWithInterval SelfType; __host__ __device__ InPtrWithInterval(const real *_d_data, sq::SizeType _stride, sq::IdxType _offset) : d_data(_d_data), stride(_stride), offset(_offset) { } __device__ __forceinline__ const real &operator[](sq::SizeType idx) const { return d_data[offset + idx * stride]; } const real *d_data; sq::SizeType stride; sq::IdxType offset; }; template<class real> struct InLinear2dPtr { typedef real value_type; typedef InLinear2dPtr SelfType; __host__ __device__ InLinear2dPtr(const real *_d_data, sq::SizeType _stride, sq::IdxType _width, sq::IdxType _offset = 0) : d_data(_d_data), stride(_stride), width(_width), offset(_offset) { } __device__ __forceinline__ const real &operator[](sq::SizeType idx) const { int x = (idx + offset) % width; int y = (idx + offset) / width; return d_data[x + y * stride]; } __device__ __forceinline__ SelfType operator+(sq::IdxType v) const { return SelfType(d_data, stride, width, offset + v); } const real *d_data; sq::SizeType stride; sq::SizeType width; sq::SizeType offset; }; /* Functors for offsets */ struct Linear { Linear(sq::IdxType _a) : a(_a) { } __device__ sq::IdxType operator[](sq::IdxType idx) const { return a * idx; } sq::SizeType a; }; /* base traits class for CUB iteratos */ template<class V> struct base_iterator_traits { using difference_type = ptrdiff_t; typedef V value_type; using pointer = V*; using reference = V&; using iterator_category = std::random_access_iterator_tag; }; } namespace std { template<template<class> class OpType, class real> struct iterator_traits<sqaod_cuda::OpOutPtr<OpType, real> > : sqaod_cuda::base_iterator_traits<real> { }; template<class real> struct iterator_traits<sqaod_cuda::InDiagonalPtr<real>> : sqaod_cuda::base_iterator_traits<real> { }; template<class real> struct iterator_traits<sqaod_cuda::InLinear2dPtr<real>> : sqaod_cuda::base_iterator_traits<real> { }; template<> struct iterator_traits<sqaod_cuda::Linear> : sqaod_cuda::base_iterator_traits<int> { }; } #include "cudafuncs.h" namespace sqaod_cuda { namespace sq = sqaod; template<class Op> __global__ void transformBlock2dKernel(Op op, sq::IdxType blockDimYOffset) { dim3 blockIdxWithOffset(blockIdx); blockIdxWithOffset.y += blockDimYOffset; op(blockDim, blockIdxWithOffset, threadIdx); } template<class Op> void transformBlock2d(const Op &op, sq::SizeType nBlocksX, sq::SizeType nBlocksY, const dim3 &blockDim, cudaStream_t stream) { sq::SizeType blockIdxYStep = 65535 / blockDim.y; for (sq::IdxType blockIdxYOffset = 0; blockIdxYOffset < nBlocksY; blockIdxYOffset += blockIdxYStep) { int blockDimYSpan = std::min(nBlocksY - blockIdxYOffset, blockIdxYStep); dim3 gridDim(nBlocksX, blockDimYSpan); transformBlock2dKernel<<<gridDim, blockDim, 0, stream>>>(op, blockIdxYOffset); DEBUG_SYNC; } } template<class Op> __global__ void transform2dKernel(Op op, sq::SizeType width, sq::SizeType height, sq::IdxType offset) { int gidx = blockDim.x * blockIdx.x + threadIdx.x; int gidy = blockDim.y * blockIdx.y + threadIdx.y + offset; if ((gidx < width) && (gidy < height)) op(gidx, gidy); } template<class Op> void transform2d(const Op &op, sq::SizeType width, sq::SizeType height, const dim3 &blockDim, cudaStream_t stream) { sq::SizeType yStep = (65535 / blockDim.y) * blockDim.y; for (sq::IdxType idx = 0; idx < height; idx += yStep) { int hSpan = std::min(height - idx, yStep); dim3 gridDim(divru(width, blockDim.x), divru(hSpan, blockDim.y)); transform2dKernel<<<gridDim, blockDim, 0, stream>>>(op, width, height, idx); DEBUG_SYNC; } } template<class OutType, class InType> void transform2d(OutType d_out, InType d_in, sq::SizeType width, sq::SizeType height, const dim3 &blockDim, cudaStream_t stream) { auto op = [=]__device__(int gidx, int gidy) { d_out(gidx, gidy) = (typename OutType::value_type)d_in(gidx, gidy); }; transform2d(op, width, height, blockDim, stream); } }
the_stack
#include <cuda.h> #include <cusparse.h> #include "cusparse_error.cuh" namespace BCL { namespace cuda { template <typename T> struct cusparse_type_t; template <> struct cusparse_type_t<int32_t> { using type = int32_t; static auto cusparse_type() { return CUSPARSE_INDEX_32I; } }; template <> struct cusparse_type_t<int64_t> { using type = int64_t; static auto cusparse_type() { return CUSPARSE_INDEX_64I; } }; cusparseHandle_t bcl_cusparse_handle_; // Some functions for debugging template <typename T, typename index_type, typename Allocator> bool is_shared_seg(CudaCSRMatrix<T, index_type, Allocator>& mat) { if (!__is_valid_cuda_gptr(mat.values_data())) { return false; } else if (!__is_valid_cuda_gptr(mat.rowptr_data())) { return false; } else if (!__is_valid_cuda_gptr(mat.colind_data())) { return false; } else { return true; } } template <typename MatrixType> void check_matrix(MatrixType& x) { size_t m = x.m(); size_t n = x.n(); size_t nnz = x.nnz(); using T = typename MatrixType::value_type; using index_type = typename MatrixType::index_type; std::vector<T> values(nnz); std::vector<index_type> rowptr(m+1); std::vector<index_type> colind(nnz); cudaMemcpy(values.data(), x.values_data(), sizeof(T)*nnz, cudaMemcpyDeviceToHost); cudaMemcpy(rowptr.data(), x.rowptr_data(), sizeof(index_type)*(m+1), cudaMemcpyDeviceToHost); cudaMemcpy(colind.data(), x.colind_data(), sizeof(index_type)*nnz, cudaMemcpyDeviceToHost); size_t counted_nnz = 0; for (size_t i = 0; i < m; i++) { index_type last_colidx = -1; for (index_type j_ptr = rowptr[i]; j_ptr < rowptr[i+1]; j_ptr++) { index_type j = colind[j_ptr]; T value = values[j_ptr]; assert(j > last_colidx); last_colidx = j; assert(i >= 0 && i < m); assert(j >= 0 && j < n); counted_nnz++; } } fprintf(stderr, "Counted %lu / %lu nnz, all within bounds (%lu, %lu)\n", counted_nnz, nnz, m, n); // assert(counted_nnz == nnz); } // Summation in CuSPARSE template <typename T, typename index_type, typename Allocator> CudaCSRMatrix<T, index_type, Allocator> sum_cusparse(CudaCSRMatrix<T, index_type, Allocator>& a, CudaCSRMatrix<T, index_type, Allocator>& b) { // XXX: Do an element-wise add using cuSparse // 'A' here is local_c, and 'B' here is result_c //. At the end, the new accumulated matrix will be put in local_c. cusparseHandle_t& handle = bcl_cusparse_handle_; index_type arows = a.shape()[0]; index_type acols = a.shape()[1]; index_type brows = b.shape()[0]; index_type bcols = b.shape()[1]; assert(acols == bcols); assert(arows == brows); index_type m = arows; index_type n = acols; static_assert(std::is_same<int, index_type>::value); cusparseMatDescr_t descr_a, descr_b, descr_c; cusparseStatus_t status = cusparseCreateMatDescr(&descr_a); BCL::cuda::throw_cusparse(status); status = cusparseCreateMatDescr(&descr_b); BCL::cuda::throw_cusparse(status); status = cusparseCreateMatDescr(&descr_c); BCL::cuda::throw_cusparse(status); status = cusparseSetMatType(descr_a, CUSPARSE_MATRIX_TYPE_GENERAL); BCL::cuda::throw_cusparse(status); status = cusparseSetMatIndexBase(descr_a, CUSPARSE_INDEX_BASE_ZERO); BCL::cuda::throw_cusparse(status); status = cusparseSetMatType(descr_b, CUSPARSE_MATRIX_TYPE_GENERAL); BCL::cuda::throw_cusparse(status); status = cusparseSetMatIndexBase(descr_b, CUSPARSE_INDEX_BASE_ZERO); BCL::cuda::throw_cusparse(status); status = cusparseSetMatType(descr_c, CUSPARSE_MATRIX_TYPE_GENERAL); BCL::cuda::throw_cusparse(status); status = cusparseSetMatIndexBase(descr_c, CUSPARSE_INDEX_BASE_ZERO); BCL::cuda::throw_cusparse(status); index_type a_nnz = a.nnz(); index_type b_nnz = b.nnz(); index_type c_nnz; index_type* nnzTotalDevHostPtr; nnzTotalDevHostPtr = &c_nnz; index_type* row_ptr_c; row_ptr_c = rebind_allocator_t<Allocator, index_type>{}.allocate(m+1); if (row_ptr_c == nullptr) { throw std::runtime_error("Couldn't allocate C."); } index_type* a_row_ptr = a.rowptr_data(); index_type* a_col_ind = a.colind_data(); index_type* b_row_ptr = b.rowptr_data(); index_type* b_col_ind = b.colind_data(); T alpha = 1.0; T beta = 1.0; size_t pBufferSizeInBytes; // TODO: what am I supposed to pass for csrValC and csrColIndC??? // we don't know nnz yet. status = cusparseScsrgeam2_bufferSizeExt(handle, m, n, &alpha, descr_a, a_nnz, a.values_data(), a.rowptr_data(), a.colind_data(), &beta, descr_b, b_nnz, b.values_data(), b.rowptr_data(), b.colind_data(), descr_c, nullptr, row_ptr_c, nullptr, &pBufferSizeInBytes); BCL::cuda::throw_cusparse(status); char* buffer = rebind_allocator_t<Allocator, char>{}.allocate(pBufferSizeInBytes); status = cusparseXcsrgeam2Nnz(handle, m, n, descr_a, a_nnz, a_row_ptr, a_col_ind, descr_b, b_nnz, b_row_ptr, b_col_ind, descr_c, row_ptr_c, nnzTotalDevHostPtr, buffer); BCL::cuda::throw_cusparse(status); if (nnzTotalDevHostPtr == nullptr) { throw std::runtime_error("Unhandled case: nnzTotalDevHostPtr is null."); } else { c_nnz = *nnzTotalDevHostPtr; } index_type* col_ind_c; T* values_c; col_ind_c = rebind_allocator_t<Allocator, index_type>{}.allocate(c_nnz); values_c = rebind_allocator_t<Allocator, T>{}.allocate(c_nnz); if (col_ind_c == nullptr || values_c == nullptr) { throw std::runtime_error("sum_tiles(): out of memory."); } status = cusparseScsrgeam2(handle, m, n, &alpha, descr_a, a_nnz, a.values_data(), a.rowptr_data(), a.colind_data(), &beta, descr_b, b_nnz, b.values_data(), b.rowptr_data(), b.colind_data(), descr_c, values_c, row_ptr_c, col_ind_c, buffer); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); cusparseDestroyMatDescr(descr_a); cusparseDestroyMatDescr(descr_b); cusparseDestroyMatDescr(descr_c); deallocate_with<char, Allocator>(buffer); return CudaCSRMatrix<T, index_type, Allocator>({m, n}, c_nnz, values_c, row_ptr_c, col_ind_c); } template <typename T, typename index_type, typename Allocator> CudaCSRMatrix<T, index_type, Allocator> sum_tiles_cusparse(std::vector<CudaCSRMatrix<T, index_type, Allocator>>& imp) { using csr_type = CudaCSRMatrix<T, index_type, Allocator>; if (imp.size() == 0) { return csr_type({0, 0}, 0); } csr_type sum = std::move(imp[0]); for (size_t i = 1; i < imp.size(); i++) { csr_type comp = std::move(imp[i]); csr_type result = sum_cusparse<T, index_type, Allocator>(sum, comp); std::swap(sum, result); } return sum; } // SpGEMM in CuSPARSE template <typename AMatrixType, typename BMatrixType> auto spgemm_cusparse(AMatrixType& a, BMatrixType& b) { // fprintf(stderr, "FIRST PRINT a values_ptr: %p, b values_ptr: %p\n", // a.values_data(), b.values_data()); /* std::vector<alloc_t> allocations = {alloc_t(a.values_data(), a.nnz()), alloc_t(a.rowptr_data(), a.shape()[0]+1), alloc_t(a.colind_data(), a.nnz()), alloc_t(b.values_data(), b.nnz()), alloc_t(b.rowptr_data(), b.shape()[0]+1), alloc_t(b.colind_data(), b.nnz())}; if (check_overlap(allocations)) { fprintf(stderr, "Overlapping!\n"); } else { fprintf(stderr, "NOT overlapping.\n"); } */ using T = typename AMatrixType::value_type; using index_type = typename AMatrixType::index_type; using Allocator = BCL::cuda::bcl_allocator<T>; // using Allocator = typename AMatrixType::allocator_type; // static assert index_type is graphblas::Index assert(a.n() == b.m()); if (a.nnz() == 0 || b.nnz() == 0) { // return empty matrix return CudaCSRMatrix<T, index_type, Allocator>({a.shape()[0], b.shape()[1]}, 0); } else { size_t m = a.m(); size_t n = b.n(); size_t k = a.n(); assert(b.m() == k); // fprintf(stderr, "First time A:\n"); // check_matrix(a); // fprintf(stderr, "First time B:\n"); // check_matrix(b); /* fprintf(stderr, "(%lu) Multiplying A (%lu x %lu), %lu nnz by B (%lu x %lu), %lu nnz -> C(%lu x %lu), ? nnz\n", BCL::rank(), a.m(), a.n(), a.nnz(), b.m(), b.n(), b.nnz(), m, n); */ // check_matrix(a); // check_matrix(b); // fprintf(stderr, "Matrices okay.\n"); cusparseHandle_t& handle = bcl_cusparse_handle_; int baseC, nnzC; csrgemm2Info_t info = nullptr; size_t bufferSize; char* buffer = nullptr; // nnzTotalDevHostPtr points to host memory int* nnzTotalDevHostPtr = &nnzC; T alpha = 1; T beta = 0; cusparseMatDescr_t descr; cusparseStatus_t status = cusparseCreateMatDescr(&descr); BCL::cuda::throw_cusparse(status); status = cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); BCL::cuda::throw_cusparse(status); status = cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); BCL::cuda::throw_cusparse(status); status = cusparseCreateCsrgemm2Info(&info); BCL::cuda::throw_cusparse(status); T* values_d = allocate_with<T, Allocator>(1); index_type* colind_d = allocate_with<index_type, Allocator>(1); index_type* rowptr_d = allocate_with<index_type, Allocator>(m+1); cudaMemset(rowptr_d, 0, sizeof(index_type)*(m+1)); cudaMemset(values_d, 0, sizeof(T)); cudaMemset(colind_d, 0, sizeof(index_type)); /* fprintf(stderr, "Calling cusparseScsrgemm2_bufferSizeExt(handle, %lu, %lu, %lu, %f,\n" " descr, %lu, %p, %p,\n" " descr, %lu, %p, %p,\n" " %f,\n" " descr, 0, 0, 0,\n" " info,\n" " &bufferSize);\n\n", m, n, k, alpha, a.nnz(), a.rowptr_data(), a.colind_data(), b.nnz(), b.rowptr_data(), b.colind_data(), beta); fprintf(stderr, "Check A:\n"); check_matrix(a); fprintf(stderr, "Check B:\n"); check_matrix(b); fprintf(stderr, "a values_ptr: %p, b values_ptr: %p\n", a.values_data(), b.values_data()); std::vector<alloc_t> allocations = {alloc_t(a.values_data(), a.nnz()), alloc_t(a.rowptr_data(), m+1), alloc_t(a.colind_data(), a.nnz()), alloc_t(b.values_data(), b.nnz()), alloc_t(b.rowptr_data(), k+1), alloc_t(b.colind_data(), b.nnz())}; if (check_overlap(allocations)) { fprintf(stderr, "Overlapping!\n"); } else { fprintf(stderr, "NOT overlapping.\n"); } */ status = cusparseScsrgemm2_bufferSizeExt(handle, m, n, k, &alpha, descr, a.nnz(), a.rowptr_data(), a.colind_data(), descr, b.nnz(), b.rowptr_data(), b.colind_data(), &beta, descr, 0, rowptr_d, colind_d, info, &bufferSize); cudaDeviceSynchronize(); BCL::cuda::throw_cusparse(status); // fprintf(stderr, "(%lu): cusparseScsrgemm2_bufferSizeExt\n", BCL::rank()); // cudaDeviceSynchronize(); buffer = allocate_with<char, Allocator>(bufferSize); // fprintf(stderr, "buffer: %p (%lu bytes)\n", buffer, bufferSize); // step 3: compute csrRowPtrC /* { fprintf(stderr, "First print, before:\n"); fprintf(stderr, "Tried to mutiply A (%lu, %lu), %lu nnz by B(%lu, %lu) %lu nnz\n", a.shape()[0], a.shape()[1], b.nnz(), b.shape()[0], b.shape()[1], b.nnz()); auto local_a = BCL::cuda::to_cpu_generic(a); auto local_a_coo = local_a.get_coo(); fprintf(stderr, "BEGIN Printing matrix:\n"); fprintf(stderr, "%lu %lu %lu\n", local_a.shape()[0], local_a.shape()[1], a.nnz()); for (size_t i = 0; i < local_a.shape()[0]; i++) { // fprintf(stderr, "printing row %lu (%lu -> %lu)\n", i, local_a.rowptr_data()[i], local_a.rowptr_data()[i+1]); for (index_type j_ptr = local_a.rowptr_data()[i]; j_ptr < local_a.rowptr_data()[i+1]; j_ptr++) { index_type j = local_a.colind_data()[j_ptr]; T value = local_a.values_data()[j_ptr]; fprintf(stderr, "%lu %d %f\n", i+1, j+1, value); } } /* for (size_t i = 0; i < local_a.vals_.size(); i++) { std::cout << local_a.vals_[i] << std::endl; printf("%f\n", local_a.vals_[i]); } for (const auto& tuple : local_a_coo) { std::cout << std::get<0>(std::get<0>(tuple)) << " " << std::get<1>(std::get<0>(tuple)) << " " << std::get<1>(tuple) << std::endl; } */ // fprintf(stderr, "END print\n"); // fflush(stdout); // sleep(1); // } index_type* csrRowPtrC = allocate_with<index_type, Allocator>(m+1); status = cusparseXcsrgemm2Nnz(handle, m, n, k, descr, a.nnz(), a.rowptr_data(), a.colind_data(), descr, b.nnz(), b.rowptr_data(), b.colind_data(), descr, 0, rowptr_d, colind_d, descr, csrRowPtrC, nnzTotalDevHostPtr, info, buffer); cudaDeviceSynchronize(); /* if (status == CUSPARSE_STATUS_EXECUTION_FAILED) { { fprintf(stderr, "Failed. I should probably print the matrices.\n"); fprintf(stderr, "Tried to mutiply A (%lu, %lu), %lu nnz by B(%lu, %lu) %lu nnz\n", a.shape()[0], a.shape()[1], b.nnz(), b.shape()[0], b.shape()[1], b.nnz()); auto local_a = BCL::cuda::to_cpu_generic(a); auto local_a_coo = local_a.get_coo(); fprintf(stderr, "BEGIN Printing matrix:\n"); for (size_t i = 0; i < local_a.shape()[0]; i++) { fprintf(stderr, "printing row %lu (%lu -> %lu)\n", i, local_a.rowptr_data()[i], local_a.rowptr_data()[i+1]); for (index_type j_ptr = local_a.rowptr_data()[i]; j_ptr < local_a.rowptr_data()[i+1]; j_ptr++) { index_type j = local_a.colind_data()[j_ptr]; T value = local_a.values_data()[j_ptr]; fprintf(stderr, "%lu %d %f\n", i, j, value); } } for (size_t i = 0; i < local_a.vals_.size(); i++) { std::cout << local_a.vals_[i] << std::endl; printf("%f\n", local_a.vals_[i]); } for (const auto& tuple : local_a_coo) { std::cout << std::get<0>(std::get<0>(tuple)) << " " << std::get<1>(std::get<0>(tuple)) << " " << std::get<1>(tuple) << std::endl; } fprintf(stderr, "END print\n"); fflush(stdout); sleep(1); } } */ // fprintf(stderr, "(%lu): cusparseXcsrgemm2Nnz\n", BCL::rank()); BCL::cuda::throw_cusparse(status); // fprintf(stderr, "(%lu): after cusparseXcsrgemm2Nnz\n", BCL::rank()); // cudaDeviceSynchronize(); if (nnzTotalDevHostPtr != nullptr) { // fprintf(stderr, "RegCopy...\n"); nnzC = *nnzTotalDevHostPtr; } else { // fprintf(stderr, "Mmecpying...\n"); cudaMemcpy(&nnzC, csrRowPtrC+m, sizeof(index_type), cudaMemcpyDeviceToHost); cudaMemcpy(&baseC, csrRowPtrC, sizeof(index_type), cudaMemcpyDeviceToHost); nnzC -= baseC; } // step 4: finish sparsity pattern and value of C index_type* csrColIndC = allocate_with<index_type, Allocator>(nnzC); T* csrValC = allocate_with<T, Allocator>(nnzC); // Remark: set csrValC to null if only sparsity pattern is required. status = cusparseScsrgemm2(handle, m, n, k, &alpha, descr, a.nnz(), a.values_data(), a.rowptr_data(), a.colind_data(), descr, b.nnz(), b.values_data(), b.rowptr_data(), b.colind_data(), &beta, descr, 0, values_d, rowptr_d, colind_d, descr, csrValC, csrRowPtrC, csrColIndC, info, buffer); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); // step 5: destroy the opaque structure cusparseDestroyCsrgemm2Info(info); cusparseDestroyMatDescr(descr); deallocate_with<char, Allocator>(buffer); deallocate_with<index_type, Allocator>(rowptr_d); deallocate_with<index_type, Allocator>(colind_d); deallocate_with<T, Allocator>(values_d); return CudaCSRMatrix<T, index_type, Allocator>({m, n}, nnzC, csrValC, csrRowPtrC, csrColIndC); } } // New CuSPARSE SpGEMM API, `cusparseSpGEMM` // Currently no advantage, since it does not // actually support any extra types. // TODO: concepts template <typename AMatrixType, typename BMatrixType> auto spgemm_cusparse_newapi(AMatrixType& a, BMatrixType& b) { // static assert index_type is graphblas::Index using T = typename AMatrixType::value_type; using index_type = typename AMatrixType::index_type; using Allocator = BCL::cuda::bcl_allocator<T>; if (a.nnz() == 0 || b.nnz() == 0) { // return empty matrix return CudaCSRMatrix<T, index_type, Allocator>({a.shape()[0], b.shape()[1]}, 0); } else { size_t m = a.m(); size_t n = b.n(); size_t k = a.n(); cusparseHandle_t& handle = bcl_cusparse_handle_; T alpha = 1; T beta = 0; fprintf(stderr, "(%lu) Multiplying A (%lu x %lu), %lu nnz by B (%lu x %lu), %lu nnz -> C(%lu x %lu), ? nnz\n", BCL::rank(), a.m(), a.n(), a.nnz(), b.m(), b.n(), b.nnz(), m, n); cusparseSpMatDescr_t descr_a, descr_b, descr_c; fprintf(stderr, "(%lu) Create CSRs...\n", BCL::rank()); cusparseStatus_t status = cusparseCreateCsr(&descr_a, a.m(), a.n(), a.nnz(), a.rowptr_data(), a.colind_data(), a.values_data(), cusparse_type_t<index_type>::cusparse_type(), cusparse_type_t<index_type>::cusparse_type(), CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); status = cusparseCreateCsr(&descr_b, b.m(), b.n(), b.nnz(), b.rowptr_data(), b.colind_data(), b.values_data(), cusparse_type_t<index_type>::cusparse_type(), cusparse_type_t<index_type>::cusparse_type(), CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); status = cusparseCreateCsr(&descr_c, m, n, 0, NULL, NULL, NULL, cusparse_type_t<index_type>::cusparse_type(), cusparse_type_t<index_type>::cusparse_type(), CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); cusparseSpGEMMDescr_t descr_spgemm; status = cusparseSpGEMM_createDescr(&descr_spgemm); BCL::cuda::throw_cusparse(status); size_t bufferSize1; fprintf(stderr, "(%lu) Estimate work...\n", BCL::rank()); status = cusparseSpGEMM_workEstimation(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, descr_a, descr_b, &beta, descr_c, CUDA_R_32F, CUSPARSE_SPGEMM_DEFAULT, descr_spgemm, &bufferSize1, NULL); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); fprintf(stderr, "(%lu) first buffer %lu bytes\n", BCL::rank(), bufferSize1); char* buffer_1 = allocate_with<char, Allocator>(bufferSize1); status = cusparseSpGEMM_workEstimation(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, descr_a, descr_b, &beta, descr_c, CUDA_R_32F, CUSPARSE_SPGEMM_DEFAULT, descr_spgemm, &bufferSize1, buffer_1); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); fprintf(stderr, "(%lu) Compute 1...\n", BCL::rank()); size_t bufferSize2; status = cusparseSpGEMM_compute(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, descr_a, descr_b, &beta, descr_c, CUDA_R_32F, CUSPARSE_SPGEMM_DEFAULT, descr_spgemm, &bufferSize2, NULL); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); fprintf(stderr, "(%lu) second buffer %lu bytes\n", BCL::rank(), bufferSize2); char* buffer_2 = allocate_with<char, Allocator>(bufferSize2); fprintf(stderr, "(%lu) Compute 2...\n", BCL::rank()); status = cusparseSpGEMM_compute(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, descr_a, descr_b, &beta, descr_c, CUDA_R_32F, CUSPARSE_SPGEMM_DEFAULT, descr_spgemm, &bufferSize2, buffer_2); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); fprintf(stderr, "(%lu) Extracting...\n", BCL::rank()); int64_t nrows, ncols, nnz; status = cusparseSpMatGetSize(descr_c, &nrows, &ncols, &nnz); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); index_type* c_colind = allocate_with<index_type, Allocator>(nnz); T* c_values = allocate_with<T, Allocator>(nnz); index_type* c_rowptr = allocate_with<index_type, Allocator>(m+1); status = cusparseCsrSetPointers(descr_c, c_rowptr, c_colind, c_values); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); status = cusparseSpGEMM_copy(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, descr_a, descr_b, &beta, descr_c, CUDA_R_32F, CUSPARSE_SPGEMM_DEFAULT, descr_spgemm); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); deallocate_with<char, Allocator>(buffer_2); deallocate_with<char, Allocator>(buffer_1); cusparseSpGEMM_destroyDescr(descr_spgemm); cusparseDestroySpMat(descr_a); cusparseDestroySpMat(descr_b); cusparseDestroySpMat(descr_c); return CudaCSRMatrix<T, index_type, Allocator>({m, n}, nnz, c_values, c_rowptr, c_colind); } } // SpMM using CuSPARSE template <typename AMatrixType, typename BMatrixType, typename CMatrixType> void spmm_cusparse(AMatrixType& a, BMatrixType& b, CMatrixType& c, typename AMatrixType::value_type alpha = 1, typename AMatrixType::value_type beta = 1) { if (a.nnz() == 0) { return; } using T = typename AMatrixType::value_type; using Allocator = BCL::cuda::bcl_allocator<T>; // using Allocator = typename AMatrix::allocator_type; static_assert(std::is_same<typename AMatrixType::value_type, float>::value); static_assert(std::is_same<typename BMatrixType::value_type, float>::value); static_assert(std::is_same<typename CMatrixType::value_type, float>::value); using index_type = typename AMatrixType::index_type; // static_assert(std::is_same<typename AMatrixType::index_type, int32_t>::value); cusparseHandle_t& handle = bcl_cusparse_handle_; cusparseOrder_t order; cusparseSpMMAlg_t algorithm; using bmatrix_indexing = typename BMatrixType::indexing_type; using cmatrix_indexing = typename CMatrixType::indexing_type; static_assert(std::is_same<bmatrix_indexing, cmatrix_indexing>::value); constexpr bool row_major = std::is_same<bmatrix_indexing, RowMajorIndexing>::value; constexpr bool column_major = std::is_same<bmatrix_indexing, ColumnMajorIndexing>::value; static_assert(row_major || column_major); if (std::is_same<bmatrix_indexing, RowMajorIndexing>::value) { order = CUSPARSE_ORDER_ROW; algorithm = CUSPARSE_SPMM_CSR_ALG2; } else if (std::is_same<bmatrix_indexing, ColumnMajorIndexing>::value) { order = CUSPARSE_ORDER_COL; algorithm = CUSPARSE_MM_ALG_DEFAULT; } cusparseSpMatDescr_t a_cusparse; cusparseStatus_t status = cusparseCreateCsr(&a_cusparse, a.m(), a.n(), a.nnz(), a.rowptr_data(), a.colind_data(), a.values_data(), cusparse_type_t<index_type>::cusparse_type(), cusparse_type_t<index_type>::cusparse_type(), CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); BCL::cuda::throw_cusparse(status); cusparseDnMatDescr_t b_cusparse; status = cusparseCreateDnMat(&b_cusparse, b.m(), b.n(), b.ld(), b.data(), CUDA_R_32F, order); BCL::cuda::throw_cusparse(status); cusparseDnMatDescr_t c_cusparse; status = cusparseCreateDnMat(&c_cusparse, c.m(), c.n(), c.ld(), c.data(), CUDA_R_32F, order); BCL::cuda::throw_cusparse(status); size_t bufferSize; status = cusparseSpMM_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, a_cusparse, b_cusparse, &beta, c_cusparse, CUDA_R_32F, algorithm, &bufferSize); BCL::cuda::throw_cusparse(status); char* externalBuffer = allocate_with<char, Allocator>(bufferSize); status = cusparseSpMM(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, a_cusparse, b_cusparse, &beta, c_cusparse, CUDA_R_32F, algorithm, externalBuffer); BCL::cuda::throw_cusparse(status); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { assert(error == cudaSuccess); } deallocate_with<char, Allocator>(externalBuffer); cusparseDestroySpMat(a_cusparse); cusparseDestroyDnMat(b_cusparse); cusparseDestroyDnMat(c_cusparse); } } // end cuda } // end BCL
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> #if __CUDA_ARCH__ >= 700 template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 32 + 16; const uint stdB = 64 + 16; const uint stdC = 128 + 4; __shared__ float fShare[stdC*16]; ehalf* hShare = (ehalf*)fShare; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*32]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint tx = tid % 8; uint ty = tid / 8; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.y *= 32*32; entry.x *= N*32; LutOffsets[i] = entry; } __syncthreads(); uint storA = ty*stdA + tx*4; uint storB = ty*stdB + tx*8 + stdA*32; uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA, (tid & 64)*(OP_A == OP_N ? 1 : stdA)*16/64); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 64)*stdB*16/64 + (tid & 32) + stdA*32); uint n = idx_N*64 + tx*8; uint offsetA = tid*4; uint offsetB = ty*N + n; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); fragmentC<OP_A,OP_N,M16N16K16> fragC[2][2]; int idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; const ehalf* pA = A + (entry.y + offsetA); uint2 a00 = load_half4(pA + 0*32); uint2 a16 = load_half4(pA + 16*32); uint4 b00, b16; asm("mov.b64 {%0, %1}, 0; \n\t" "mov.b64 {%2, %3}, 0; \n\t" "mov.b64 {%4, %5}, 0; \n\t" "mov.b64 {%6, %7}, 0; \n\t" "@pn ld.global.nc.v4.u32 {%0, %1, %2, %3}, [%8];\n\t" "@pn ld.global.nc.v4.u32 {%4, %5, %6, %7}, [%9];\n\t" : "=r"(b00.x), "=r"(b00.y), "=r"(b00.z), "=r"(b00.w), "=r"(b16.x), "=r"(b16.y), "=r"(b16.z), "=r"(b16.w) : "l"(B + (entry.x + offsetB + N* 0)), "l"(B + (entry.x + offsetB + N*16))); if (GATED) { asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a00.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a00.y) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a16.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a16.y) : "r"(gate)); } __syncthreads(); *(uint2*)&hShare[storA + 0*stdA] = a00; *(uint2*)&hShare[storA + 16*stdA] = a16; *(uint4*)&hShare[storB + 0*stdB] = b00; *(uint4*)&hShare[storB + 16*stdB] = b16; __syncthreads(); fragmentA<OP_A,M16N16K16> fragA[2]; fragmentB<OP_N,M16N16K16> fragB[2]; for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA); fragB[i].load(hShare, loadB + i*16, stdB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } while (++idx_lut < lut_size); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); uint txc = tid % 16; uint tyc = tid / 16; n = idx_N*64 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 96); uint offsetC = (idx_K*32 + tyc)*N + n; if (idx_Lock == 0) { for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); if (n < N) for (int j = 0; j < 2; j++) store_half4(C + (offsetC + N*(j*8 + i*16)), to_half4(ew_add( *(float4*)&fShare[loadC + stdC*j*8 + 0], *(float4*)&fShare[loadC + stdC*j*8 + 64]))); } } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); if (n < N) for (int j = 0; j < 2; j++) store_half4(C + (offsetC + N*(j*8 + i*16)), to_half4(ew_add( *(float4*)&fShare[loadC + stdC*j*8 + 0], *(float4*)&fShare[loadC + stdC*j*8 + 64]))); } __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 32; tyc = tid / 32; n = idx_N*64 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*32 + tyc)*N + n; // subsequent blocks must accumulate for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); if (n < N) for (int j = 0; j < 4; j++) reduce_half2(C + (offsetC + N*(j*4 + i*16)), to_half2(ew_add( *(float2*)&fShare[loadC + stdC*j*4 + 0], *(float2*)&fShare[loadC + stdC*j*4 + 64]))); } __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else { uint n = idx_N*64 + tx*8; uint offsetC = (idx_K*32 + ty)*N + n; if (n < N) { zero_half8(C + (offsetC + N *0)); zero_half8(C + (offsetC + N*16)); } } } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 16; const uint stdB = 80; const uint stdC = 68; __shared__ ehalf hShare[(stdA + stdB)*16]; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*16]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint txb = tid % 8; uint tyb = tid / 8; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 64) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.y *= 16*16; entry.x *= N*16; LutOffsets[i] = entry; } __syncthreads(); uint txa = tid % 4; uint tya = tid / 4; uint storA = tya*stdA + txa*4; uint storB = tyb*stdB + txb*8 + 16*stdA; uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, 16*stdA + (tid & 32)); uint n = idx_N*64 + txb*8; uint offsetA = tid*4; uint offsetB = tyb*N + n; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); fragmentC<OP_A,OP_N,M16N16K16> fragC[2]; int idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; uint2 a0 = load_half4(A + (entry.y + offsetA)); uint4 b0, b8; asm("mov.b64 {%0, %1}, 0; \n\t" "mov.b64 {%2, %3}, 0; \n\t" "mov.b64 {%4, %5}, 0; \n\t" "mov.b64 {%6, %7}, 0; \n\t" "@pn ld.global.nc.v4.u32 {%0, %1, %2, %3}, [%8];\n\t" "@pn ld.global.nc.v4.u32 {%4, %5, %6, %7}, [%9];\n\t" : "=r"(b0.x), "=r"(b0.y), "=r"(b0.z), "=r"(b0.w), "=r"(b8.x), "=r"(b8.y), "=r"(b8.z), "=r"(b8.w) : "l"(B + (entry.x + offsetB + N*0)), "l"(B + (entry.x + offsetB + N*8))); if (GATED) { asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a0.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a0.y) : "r"(gate)); } __syncthreads(); *(uint2*)&hShare[storA] = a0; *(uint4*)&hShare[storB + 0*stdB] = b0; *(uint4*)&hShare[storB + 8*stdB] = b8; __syncthreads(); fragmentA<OP_A,M16N16K16> fragA; fragmentB<OP_N,M16N16K16> fragB; fragA.load(hShare, loadA, stdA); #pragma unroll for (int j = 0; j < 2; j++) { fragB.load(hShare, loadB + j*16, stdB); fragC[j].mma_sync(fragA, fragB); } } } while (++idx_lut < lut_size); // allow assembler to forget these registers in the main loop asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); // use thread stride of 4 to allow use of shared stride of 68 // which minimizes shared bank conflicts on write. uint txc = tid % 16; uint tyc = tid / 16; n = idx_N*64 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 32); uint offsetC = (idx_K*16 + tyc)*N + n; __syncthreads(); for (int j = 0; j < 2; j++) fragC[j].store(hShare, storC + j*16, stdC); __syncthreads(); if (idx_Lock == 0) { // no lock needed just write out the results for (uint i = 0; i < 4; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 4; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 32; tyc = tid / 32; n = idx_N*64 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*16 + tyc)*N + n; // subsequent blocks must accumulate for (uint i = 0; i < 8; i++) if (n < N) reduce_half2(C + (offsetC + N*i*2), *(uint*)&hShare[loadC + stdC*i*2]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else { uint n = idx_N*64 + txb*8; C += (idx_K*16 + tyb)*N + n; if (n < N) { zero_half8(C + N*0); zero_half8(C + N*8); } } } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 8; const uint stdB = 80; const uint stdC = 68; __shared__ ehalf hShare[(stdA + stdB)*16]; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*16]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 64) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.y *= 8*8; // 64 entries of A per block entry.x *= N*8; // 8 lines of B per block LutOffsets[i] = entry; } if (tid == 0) Gates[lut_size] = 0; // needed if lut_size is odd __syncthreads(); uint t32 = tid & 32; uint t31 = tid & 31; uint txb = tid % 8; uint tyb = t31 / 8; uint storA = tid*2; uint storB = tyb*stdB + txb*8 + t32*20 + 16*stdA; uint loadA = fragmentA<OP_A,M8N32K16>::get_idx(tid, stdA); uint loadB = fragmentB<OP_N,M8N32K16>::get_idx(tid, stdB, t32 + 16*stdA); uint n = idx_N*64 + txb*8; uint offsetA = t31*2; uint offsetB = tyb*N + n; fragmentC<OP_A,OP_N,M8N32K16> fragC; uint idx_lut = t32 / 32; uint idx_lut2 = 0; uint lut_size2 = (lut_size + 1)/2; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); #pragma unroll 1 do { uint a0 = 0; uint4 b0 = {0}; uint4 b4 = {0}; uint gate = Gates[idx_lut]; // if the gate is zero just skip over memory loads // we compute 2 blocks per loop so it's easier to just always do the mma math if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; a0 = load_half2(A + (entry.y + offsetA)); asm("@pn ld.global.nc.v4.u32 {%0, %1, %2, %3}, [%8];\n\t" "@pn ld.global.nc.v4.u32 {%4, %5, %6, %7}, [%9];\n\t" : "=r"(b0.x), "=r"(b0.y), "=r"(b0.z), "=r"(b0.w), "=r"(b4.x), "=r"(b4.y), "=r"(b4.z), "=r"(b4.w) : "l"(B + (entry.x + offsetB + N*0)), "l"(B + (entry.x + offsetB + N*4))); if (GATED) asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a0) : "r"(gate)); } // if (OP_A == OP_T) // printf("%d %2d A:%08x B: %08x %08x %08x %08x %08x %08x %08x %08x\n", idx_K, tid, a0, b0.x,b0.y,b0.z,b0.w, b4.x,b4.y,b4.z,b4.w); __syncthreads(); *(uint* )&hShare[storA ] = a0; *(uint4*)&hShare[storB + 0*stdB] = b0; *(uint4*)&hShare[storB + 4*stdB] = b4; __syncthreads(); fragmentA<OP_A,M8N32K16> fragA; fragmentB<OP_N,M8N32K16> fragB; fragA.load(hShare, loadA, stdA); fragB.load(hShare, loadB, stdB); // if (OP_A == OP_T) // printf("%d %2d A:%08x %08x %08x %08x %08x %08x %08x %08x B:%08x %08x %08x %08x %08x %08x %08x %08x\n", idx_K, tid, // fragA.x[0], fragA.x[1], fragA.x[2], fragA.x[3], fragA.x[4], fragA.x[5], fragA.x[6], fragA.x[7], // fragB.x[0], fragB.x[1], fragB.x[2], fragB.x[3], fragB.x[4], fragB.x[5], fragB.x[6], fragB.x[7]); fragC.mma_sync(fragA, fragB); idx_lut += 2; } while (++idx_lut2 < lut_size2); // allow assembler to forget these registers in the main loop asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); // use thread stride of 4 to allow use of shared stride of 68 // which minimizes shared bank conflicts on write. uint txc = tid % 16; uint tyc = tid / 16; n = idx_N*64 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M8N32K16>::get_idx(tid, stdC, tid & 32); uint offsetC = (idx_K*8 + tyc)*N + n; // if (OP_A == OP_T) // printf("%d %d %2d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", idx_K, idx_Lock, tid, fragC.x[0], fragC.x[1], fragC.x[2], fragC.x[3], fragC.x[4], fragC.x[5], fragC.x[6], fragC.x[7]); __syncthreads(); fragC.store(hShare, storC, stdC); __syncthreads(); if (idx_Lock == 0) { // no lock needed just write out the results for (uint i = 0; i < 2; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 2; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 32; tyc = tid / 32; n = idx_N*64 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*8 + tyc)*N + n; // subsequent blocks must accumulate for (uint i = 0; i < 4; i++) if (n < N) reduce_half2(C +(offsetC + N*i*2), *(uint*)&hShare[loadC + stdC*i*2]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else // lut_size == 0 { uint txc = tid % 8; uint tyc = tid / 8; uint n = idx_N*64 + txc*8; uint offsetC = (idx_K*8 + tyc)*N + n; if (n < N) zero_half8(C + offsetC); } } template <bool N64, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 72; const uint stdC = 132; __shared__ ehalf hShare[stdAB*2*32]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 8; uint ty = tid / 8; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*32 + ty)*N + n0; uint offsetB0 = (idx_B*32 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 96)/2); uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 96)/2 + stdAB*32); fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2]; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint4 a00 = {0}, a16 = {0}; uint4 b00 = {0}, b16 = {0}; if (N64 || n < N) { a00 = load_half8(A0 + (offsetA + N*00)); a16 = load_half8(A0 + (offsetA + N*16)); b00 = load_half8(B0 + (offsetB + N*00)); b16 = load_half8(B0 + (offsetB + N*16)); } offsetA += 64; offsetB += 64; if (!N64) n += 64; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a00; *(uint4*)&hShare[storAB + 16*stdAB + 0*stdAB] = a16; *(uint4*)&hShare[storAB + 0*stdAB + 32*stdAB] = b00; *(uint4*)&hShare[storAB + 16*stdAB + 32*stdAB] = b16; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA[2]; fragmentB<OP_T,M16N16K16> fragB[2]; for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + stdAB*i*16, stdAB); fragB[i].load(hShare, loadB + stdAB*i*16, stdAB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96)); if (accumulate) { tx = tid % 16; ty = tid / 16; uint loadC = ty*stdC + tx*2; uint offsetC = bid*32*32 + tid*2; for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); for (uint j = 0; j < 2; j++) { float2 sum2 = ew_add( ew_add( *(float2*)&fShare[loadC + j*8*stdC + 0], *(float2*)&fShare[loadC + j*8*stdC + 32]), ew_add( *(float2*)&fShare[loadC + j*8*stdC + 64], *(float2*)&fShare[loadC + j*8*stdC + 96])); reduce_half2(C + offsetC + i*4*128 + j*2*128, to_half2(sum2)); } } } else { tx = tid % 8; ty = tid / 8; uint loadC = ty*stdC + tx*4; uint offsetC = bid*32*32 + tid*4; for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); float4 sum4 = ew_add( ew_add( *(float4*)&fShare[loadC + 0], *(float4*)&fShare[loadC + 32]), ew_add( *(float4*)&fShare[loadC + 64], *(float4*)&fShare[loadC + 96])); store_half4(C + offsetC + i*4*128, to_half4(sum4)); } } } else if (!accumulate) // gate == 0 zero_half8(C + (bid*32*32 + tid*8)); } template <bool N64, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 72; const uint stdC = 48; __shared__ ehalf hShare[stdAB*2*16]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 8; uint ty = tid / 8; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*16 + ty)*N + n0; uint offsetB0 = (idx_B*16 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 32)); uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 32) + 16*stdAB); fragmentC<OP_N,OP_T,M16N16K16> fragC; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint4 a0 = {0}, a8 = {0}; uint4 b0 = {0}, b8 = {0}; if (N64 || n < N) { a0 = load_half8(A0 + (offsetA + N*0)); a8 = load_half8(A0 + (offsetA + N*8)); b0 = load_half8(B0 + (offsetB + N*0)); b8 = load_half8(B0 + (offsetB + N*8)); } offsetA += 64; offsetB += 64; if (!N64) n += 64; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0; *(uint4*)&hShare[storAB + 8*stdAB + 0*stdAB] = a8; *(uint4*)&hShare[storAB + 0*stdAB + 16*stdAB] = b0; *(uint4*)&hShare[storAB + 8*stdAB + 16*stdAB] = b8; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA; fragmentB<OP_T,M16N16K16> fragB; #pragma unroll for (uint j = 0; j < 2; j++) { fragA.load(hShare, loadA + j*16, stdAB); fragB.load(hShare, loadB + j*16, stdAB); fragC.mma_sync(fragA, fragB); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 32)/2); __syncthreads(); fragC.store(fShare, storC, stdC); __syncthreads(); if (accumulate) { tx = tid % 8; ty = tid / 8; uint loadC = ty*stdC + tx*2; uint offsetC = bid*16*16 + tid*2; for (uint i = 0; i < 2; i++) reduce_half2(C + offsetC + i*2*64, to_half2(ew_add( *(float2*)&fShare[loadC + i*8*stdC + 0], *(float2*)&fShare[loadC + i*8*stdC + 16]))); } else { tx = tid % 4; ty = tid / 4; uint loadC = ty*stdC + tx*4; uint offsetC = bid*16*16 + tid*4; store_half4(C + offsetC, to_half4(ew_add( *(float4*)&fShare[loadC + 0], *(float4*)&fShare[loadC + 16]))); } } else if (!accumulate) // gate == 0 zero_half4(C + (bid*16*16 + tid*4)); } template <bool N64, bool GATED> __global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 72; const uint stdC = 8; __shared__ ehalf hShare[stdAB*8*2]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 8; uint ty = tid / 8; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*8 + ty)*N + n0; uint offsetB0 = (idx_B*8 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M8N8K16>::get_idx(tid, stdAB, 0*stdAB); uint loadB = fragmentB<OP_T,M8N8K16>::get_idx(tid, stdAB, 8*stdAB); fragmentC<OP_N,OP_T,M8N8K16> fragC; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint4 a0 = {0}, a4 = {0}; uint4 b0 = {0}, b4 = {0}; if (N64 || n < N) { a0 = load_half8(A0 + (offsetA + N*0)); a4 = load_half8(A0 + (offsetA + N*4)); b0 = load_half8(B0 + (offsetB + N*0)); b4 = load_half8(B0 + (offsetB + N*4)); } offsetA += 64; offsetB += 64; if (!N64) n += 64; *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0; *(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4; *(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0; *(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4; fragmentA<OP_N,M8N8K16> fragA; fragmentB<OP_T,M8N8K16> fragB; #pragma unroll for (uint j = 0; j < 4; j++) { fragA.load(hShare, loadA + j*16, stdAB); fragB.load(hShare, loadB + j*16, stdAB); fragC.mma_sync(fragA, fragB); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_N,OP_T,M8N8K16>::get_idx(tid, stdC); fragC.store(fShare, storC, stdC); C += bid*8*8 + tid*2; uint c2 = to_half2(*(float2*)&fShare[tid*2]); if (accumulate) reduce_half2(C, c2); else store_half2(C, c2); } else if (!accumulate) // gate == 0 zero_half2(C + (bid*8*8 + tid*2)); } #else // __CUDA_ARCH__ >= 700 template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x64x32_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N64, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_16x64x16_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N64, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_16x16x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_8x64x8_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N64, bool GATED> __global__ void __launch_bounds__(32) hgemm_blocksparse_8x8x64_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } #endif // __CUDA_ARCH__ >= 700 cudaError_t hgemm_blocksparse_xn_64_sdd(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params, uint op) { dim3 grid(params->blk_a*params->blk_b, params->blk_B, params->blk_A); uint blk_N = params->blk_b * params->blk_B; //cuMemsetD16Async((CUdeviceptr)Y, 0, params->K * params->N, params->stream); if (params->locks > 0) cuMemsetD32Async((CUdeviceptr)params->Lock, 0, blk_N * params->locks * 2, params->stream); const uint2* Lut = (const uint2*)params->Lut; uint* Lock = (uint*)params->Lock; uint shared = params->shared + params->shared/2; if (params->bsize == 8) { shared += 4; if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_8x64x8_xn_sdd<OP_N,false><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_8x64x8_xn_sdd<OP_T,false><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_8x64x8_xn_sdd<OP_N, true><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_8x64x8_xn_sdd<OP_T, true><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } else if (params->bsize == 16) { if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_16x64x16_xn_sdd<OP_N,false><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_16x64x16_xn_sdd<OP_T,false><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_16x64x16_xn_sdd<OP_N, true><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_16x64x16_xn_sdd<OP_T, true><<<grid,64,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } else if (params->bsize == 32) { // 256 = (128+4)*16*4 - (64+16 + 32+16)*32*2 shared = shared > 256 ? shared - 256 : 0; if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_32x64x32_xn_sdd<OP_N,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_32x64x32_xn_sdd<OP_T,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_32x64x32_xn_sdd<OP_N, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_32x64x32_xn_sdd<OP_T, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_xn_64_sdd(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_xn_64_sdd(const float* X, const float* W, float* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_nt_64_dds(const ehalf* X, const ehalf* E, ehalf* U, bsmm_params* params) { struct Plist<ehalf,8>* X8 = (struct Plist<ehalf,8>*)X; struct Plist<ehalf,8>* E8 = (struct Plist<ehalf,8>*)E; const uint2* Lut = (const uint2*)params->Lut; uint accumulate = params->beta == 1.0f; uint pcount8 = params->pcount * 8; uint N = params->N; uint loops = CEIL_DIV(N, 64); bool k64 = (N & 63) == 0; dim3 grid(params->blocks, 1, 1); if (params->bsize == 8) { if (params->Gate == 0) { if (k64) hgemm_blocksparse_8x8x64_nt_dds< true,false><<<grid,32,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_8x8x64_nt_dds<false,false><<<grid,32,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k64) hgemm_blocksparse_8x8x64_nt_dds< true, true><<<grid,32,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_8x8x64_nt_dds<false, true><<<grid,32,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } else if (params->bsize == 16) { if (params->Gate == 0) { if (k64) hgemm_blocksparse_16x16x64_nt_dds< true,false><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_16x16x64_nt_dds<false,false><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k64) hgemm_blocksparse_16x16x64_nt_dds< true, true><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_16x16x64_nt_dds<false, true><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } else if (params->bsize == 32) { if (params->Gate == 0) { if (k64) hgemm_blocksparse_32x32x64_nt_dds< true,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_32x32x64_nt_dds<false,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k64) hgemm_blocksparse_32x32x64_nt_dds< true, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_32x32x64_nt_dds<false, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_nt_64_dds(const bhalf* X, const bhalf* E, bhalf* U, bsmm_params* params) { return cudaSuccess; } cudaError_t hgemm_blocksparse_nt_64_dds(const float* X, const float* E, float* U, bsmm_params* params) { return cudaSuccess; } // dg = sum(dw * w, axis=1,2) template <typename T, uint BSIZE, uint THREADS> __global__ void __launch_bounds__(THREADS) blocksparse_gate_grad(T* DW_out, float* DG, const T* __restrict__ DW, const T* __restrict__ W, const float* __restrict__ G) { const uint U = BSIZE*BSIZE/THREADS; uint bid = blockIdx.x; uint tid = threadIdx.x; uint offset = bid*BSIZE*BSIZE + tid; float g = G[bid]; DW += offset; W += offset; DW_out += offset; float dw[U], w[U]; for (uint j = 0; j < U; j++) { dw[j] = load(DW, j*THREADS); w[j] = load( W, j*THREADS); store(DW_out, dw[j]*g, j*THREADS); } // Reduce max within this thread float dg = 0.0f; for (uint j = 0; j < U; j++) dg += ew_mul(dw[j], w[j]); // reduce within warp for (int i = 16; i > 0; i >>= 1) dg += shfl_xor(dg, i); // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid / 32] = dg; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions dg = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) dg += shfl_xor(dg, i); } } // first thread has the final reduced max_abs if (tid == 0) DG[bid] = dg; } template <typename T> bool BlocksparseGateGrad(CUstream stream, T* dw_out, float* dg, const T* dw, const T* w, const float* g, uint blocks, uint bsize) { if (bsize == 8) blocksparse_gate_grad<T, 8, 32><<<blocks, 32,0,stream>>>(dw_out, dg, dw, w, g); else if (bsize == 16) blocksparse_gate_grad<T,16, 64><<<blocks, 64,0,stream>>>(dw_out, dg, dw, w, g); else if (bsize == 32) blocksparse_gate_grad<T,32, 256><<<blocks, 256,0,stream>>>(dw_out, dg, dw, w, g); else if (bsize == 64) blocksparse_gate_grad<T,64,1024><<<blocks,1024,0,stream>>>(dw_out, dg, dw, w, g); return true; } template bool BlocksparseGateGrad<float>(CUstream stream, float* dw_out, float* dg, const float* dw, const float* w, const float* g, uint blocks, uint bsize); template bool BlocksparseGateGrad<ehalf>(CUstream stream, ehalf* dw_out, float* dg, const ehalf* dw, const ehalf* w, const float* g, uint blocks, uint bsize); #define MAX_NORM 0 #define L2_NORM 1 #if __CUDA_ARCH__ >= 700 template <uint BSIZE, uint NORM> __global__ void __launch_bounds__(256,4) blocksparse_feature_reduce_cn( const struct Plist<ehalf,8> X8, ehalf* Y, uint params, uint N) { const ehalf* X; uint tid = threadIdx.x; uint bn = blockIdx.x; uint bc = blockIdx.y; // Each warp works on a Plist entry uint p = tid / 32; // index into Plist uint tp = tid % 32; asm("ld.param.u64 %0, [%1 + 0x160];" : "=l"(X) : "r"(p * 8)); uint tn = tp % 8; uint tc = tp / 8; uint n = bn*64 + tn*8; X += (bc*BSIZE + tc)*N + n; Y += bc*params*N + p*N + n; asm("mov.b64 %0, %0;" : "+l"(X) : ); asm("mov.b64 %0, %0;" : "+l"(Y) : ); float8 norm; ew_zero(norm); bool n_valid = n < N; #pragma unroll 4 for (uint c = 0; c < BSIZE; c += 4) { float8 x = load((const ehalf8*)(X + c*N), 0, n_valid); if (NORM == MAX_NORM) norm = ew_maximum(ew_abs(x), norm); else norm = ew_add(ew_sqr(x), norm); } if (NORM == MAX_NORM) { #pragma unroll for (int i = 16; i > 4; i >>= 1) norm = ew_warp_max(norm, i); } else { #pragma unroll for (int i = 16; i > 4; i >>= 1) norm = ew_warp_sum(norm, i); norm = ew_sqrt(norm); } store((ehalf8*)Y, norm, 0, n_valid && tp < 8); } template <bool M32, bool ACCUMULATE> __device__ __noinline__ void store_32x32x64_nt(float* C, uint loadC, uint M, uint N, uint cy, uint cx, uint i, uint stdC, float scale) { for (uint j = 0; j < 4; j++) if (M32 || cy + i*16 + j*4 < M) { float out = ew_zero_nan_inf(ew_mul(ew_add( ew_add( ld_shared_float1(loadC + j*4*stdC + 0*32), ld_shared_float1(loadC + j*4*stdC + 1*32)), ew_add( ld_shared_float1(loadC + j*4*stdC + 2*32), ld_shared_float1(loadC + j*4*stdC + 3*32))), scale)); uint offsetC = (cy + i*16 + j*4)*N + cx; if (ACCUMULATE) atomicRed(C + offsetC, out); else store(C + offsetC, out); } } template <bool M32, bool ACCUMULATE> __global__ void __launch_bounds__(128,8) hgemm_32x32x64_nt( const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, float scale) { const uint stdAB = 72; const uint stdC = 132; __shared__ ehalf hShare[stdAB*2*32]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; idx_A = idx_A * blk_a + idx_ab / blk_b; idx_B = idx_B * blk_b + idx_ab % blk_b; uint tx = tid % 8; uint ty = tid / 8; uint tk = tx * 8; uint ta = idx_A*32 + ty; uint tb = idx_B*32 + ty; uint offsetA = ta*K + tk; uint offsetB = tb*K + tk; uint storAB = ty*stdAB + tk; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 96)/2); uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 96)/2 + stdAB*32); asm(".reg .pred a00, a16, b00, b16; \n\t" "setp.lt.u32 a00, %0, %2; \n\t" "setp.lt.u32 a16, %1, %2; \n\t" "setp.lt.u32 b00, %3, %5; \n\t" "setp.lt.u32 b16, %4, %5; \n\t" :: "r"(ta), "r"(ta+16), "r"(M), "r"(tb), "r"(tb+16), "r"(N) ); fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2]; #pragma unroll 1 for (uint k = 0; k < K; k += 64) { uint4 a00, a16, b00, b16; asm volatile("{\n\t" ".reg .pred ka00, ka16, kb00, kb16; \n\t" "setp.lt.and.u32 ka00, %20, %21, a00; \n\t" "setp.lt.and.u32 ka16, %20, %21, a16; \n\t" "setp.lt.and.u32 kb00, %20, %21, b00; \n\t" "setp.lt.and.u32 kb16, %20, %21, b16; \n\t" "mov.b64 { %0, %1}, 0; \n\t" "mov.b64 { %2, %3}, 0; \n\t" "mov.b64 { %4, %5}, 0; \n\t" "mov.b64 { %6, %7}, 0; \n\t" "mov.b64 { %8, %9}, 0; \n\t" "mov.b64 {%10, %11}, 0; \n\t" "mov.b64 {%12, %13}, 0; \n\t" "mov.b64 {%14, %15}, 0; \n\t" "@ka00 ld.global.nc.v4.u32 { %0, %1, %2, %3}, [%16]; \n\t" "@ka16 ld.global.nc.v4.u32 { %4, %5, %6, %7}, [%17]; \n\t" "@kb00 ld.global.nc.v4.u32 { %8, %9, %10, %11}, [%18]; \n\t" "@kb16 ld.global.nc.v4.u32 {%12, %13, %14, %15}, [%19]; \n\t" "}" : "=r"(a00.x), "=r"(a00.y), "=r"(a00.z), "=r"(a00.w), "=r"(a16.x), "=r"(a16.y), "=r"(a16.z), "=r"(a16.w), "=r"(b00.x), "=r"(b00.y), "=r"(b00.z), "=r"(b00.w), "=r"(b16.x), "=r"(b16.y), "=r"(b16.z), "=r"(b16.w) : "l"(A + (offsetA + 0*K)), "l"(A + (offsetA + 16*K)) "l"(B + (offsetB + 0*K)), "l"(B + (offsetB + 16*K)), "r"(tk), "r"(K) ); offsetA += 64; offsetB += 64; tk += 64; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a00; *(uint4*)&hShare[storAB + 16*stdAB + 0*stdAB] = a16; *(uint4*)&hShare[storAB + 0*stdAB + 32*stdAB] = b00; *(uint4*)&hShare[storAB + 16*stdAB + 32*stdAB] = b16; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA[2]; fragmentB<OP_T,M16N16K16> fragB[2]; for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + stdAB*i*16, stdAB); fragB[i].load(hShare, loadB + stdAB*i*16, stdAB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_ab) :); asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :); asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_A) :); idx_A = idx_A * blk_a + idx_ab / blk_b; idx_B = idx_B * blk_b + idx_ab % blk_b; tx = tid % 32; ty = tid / 32; uint cx = idx_B*32 + tx; uint cy = idx_A*32 + ty; uint loadC = ty*stdC + tx; uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96)); bool cx_valid = cx < N; for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); if (cx_valid) store_32x32x64_nt<M32,ACCUMULATE>(C, loadC, M, N, cy, cx, i, stdC, scale); } } #else // __CUDA_ARCH__ >= 700 template <uint BSIZE, uint NORM> __global__ void __launch_bounds__(256,4) blocksparse_feature_reduce_cn( const struct Plist<ehalf,8> X8, ehalf* Y, uint params, uint N) { *Y = 0; } template <bool M32, bool ACCUMULATE> __global__ void __launch_bounds__(128,8) hgemm_32x32x64_nt( const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, float scale) { *C = 0; } #endif // __CUDA_ARCH__ >= 700 bool BlocksparseFeatureReduceCN(CUstream stream, ehalf* Y, const struct Plist<ehalf,8>* X8, uint params, uint C, uint N, uint bshift, uint norm_type) { dim3 grid(CEIL_DIV(N, 64), C >> bshift, 1); uint threads = params * 32; if (norm_type == MAX_NORM) { if (bshift == 3) blocksparse_feature_reduce_cn< 8,MAX_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); else if (bshift == 4) blocksparse_feature_reduce_cn<16,MAX_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); else blocksparse_feature_reduce_cn<32,MAX_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); } else { if (bshift == 3) blocksparse_feature_reduce_cn< 8, L2_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); else if (bshift == 4) blocksparse_feature_reduce_cn<16, L2_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); else blocksparse_feature_reduce_cn<32, L2_NORM><<<grid,threads,0,stream>>>(*X8, Y, params, N); } return true; } bool hGemmNT(CUstream stream, const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, uint blk_A, uint blk_B, uint accumulate, float scale) { if (scale != 0.0f) { dim3 grid(blk_a*blk_b, blk_B, blk_A); if (M & 31) if (accumulate) hgemm_32x32x64_nt<false, true><<<grid,128,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else hgemm_32x32x64_nt<false,false><<<grid,128,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else if (accumulate) hgemm_32x32x64_nt< true, true><<<grid,128,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else hgemm_32x32x64_nt< true,false><<<grid,128,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); } else if (accumulate == 0) cuMemsetD32Async((CUdeviceptr)C, 0, M*N, stream); return true; } #endif // GOOGLE_CUDA
the_stack
// Another possibility: // #include <torch/all.h> #include <assert.h> #include "type_shim.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 template<typename T> __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template <typename FROM_T, typename TO_T> __device__ void convert(const FROM_T vi, TO_T& vo) { vo = static_cast<TO_T>(vi); } template <> __device__ void convert(const float vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = vi; s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, float& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = static_cast<float>(t.as_half); } template <> __device__ void convert(const at::Half vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = static_cast<float>(vi); s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, at::Half& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = t.as_half; } typedef enum{ MOMENT_MODE_0 =0, // L2 regularization mode MOMENT_MODE_1 =1 // Decoupled weight decay mode } adamMode_t; template<typename T, typename GRAD_T, typename MATH_T> struct DistOptLAMBStage1Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<5>& tl, const MATH_T* per_tensor_beta1, const MATH_T* per_tensor_beta2, const MATH_T* per_tensor_beta3, const int* per_tensor_bias_correction, const int step, const MATH_T* per_tensor_epsilon, adamMode_t mode, const MATH_T* per_tensor_decay, const float grad_scale) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; MATH_T beta1 = per_tensor_beta1[tensor_num]; MATH_T beta2 = per_tensor_beta2[tensor_num]; MATH_T beta3 = 1 - beta1; MATH_T beta1_correction, beta2_correction; if (per_tensor_bias_correction[tensor_num] == 1) { beta1_correction = 1 - pow(beta1, step); beta2_correction = 1 - pow(beta2, step); } else { beta1_correction = (MATH_T) 1.0; beta2_correction = (MATH_T) 1.0; } MATH_T epsilon = per_tensor_epsilon[tensor_num]; MATH_T decay = per_tensor_decay[tensor_num]; GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc]; g += chunk_idx*chunk_size; T* p = (T*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; T* m = (T*)tl.addresses[2][tensor_loc]; m += chunk_idx*chunk_size; T* v = (T*)tl.addresses[3][tensor_loc]; v += chunk_idx*chunk_size; MATH_T* u = (MATH_T*)tl.addresses[4][tensor_loc]; u += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(g) && is_aligned(p) && is_aligned(m) && is_aligned(v)) { GRAD_T l_g[ILP]; T l_p[ILP]; T l_m[ILP]; T l_v[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(l_g, g, 0, i_start); if (decay != 0) load_store(l_p, p, 0, i_start); load_store(l_m, m, 0, i_start); load_store(l_v, v, 0, i_start); // unpack #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_g[ii] = l_g[ii]; if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = l_p[ii]; } r_m[ii] = l_m[ii]; r_v[ii] = l_v[ii]; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / grad_scale; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / grad_scale; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { l_m[ii] = r_m[ii]; l_v[ii] = r_v[ii]; } // store load_store(u, r_p, i_start, 0); load_store(m, l_m, i_start, 0); load_store(v, l_v, i_start, 0); } } else { // see note in multi_tensor_scale_kernel.cu for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_g[ii] = g[i]; // special ?optimization? for lamb stage 1 if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = p[i]; } r_m[ii] = m[i]; r_v[ii] = v[i]; } else { r_g[ii] = MATH_T(0); r_p[ii] = MATH_T(0); r_m[ii] = MATH_T(0); r_v[ii] = MATH_T(0); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / grad_scale; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / grad_scale; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { u[i] = r_p[ii]; m[i] = r_m[ii]; v[i] = r_v[ii]; } } } } } }; // Step 2 reads in 'update' value and per-tensor param_norm and update_norm. // It computes new parameter value. template<typename T, typename GRAD_T, typename MATH_T> struct DistOptLAMBStage2Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<3>& tl, const MATH_T* per_tensor_param_norm, const MATH_T* per_tensor_update_norm, const MATH_T learning_rate, const MATH_T* per_tensor_decay, bool use_nvlamb) { // I'd like this kernel to propagate infs/nans. // if(*noop_gmem == 1) // return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; MATH_T decay = per_tensor_decay[tensor_num]; MATH_T ratio = learning_rate; // nvlamb: apply adaptive learning rate to all parameters // otherwise, only apply to those with non-zero weight decay if (use_nvlamb || (decay != (MATH_T) 0.0)) { MATH_T param_norm = per_tensor_param_norm[tensor_num]; MATH_T update_norm = per_tensor_update_norm[tensor_num]; ratio = (update_norm != 0.0 && param_norm != 0.0) ? learning_rate * (param_norm / update_norm) : learning_rate; } MATH_T* update = (MATH_T*)tl.addresses[0][tensor_loc]; update += chunk_idx*chunk_size; T* p = (T*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; GRAD_T* p_copy = (GRAD_T*)tl.addresses[2][tensor_loc]; p_copy += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(update)) { T r_p[ILP]; MATH_T r_update[ILP]; GRAD_T r_p_copy[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_p, p, 0, i_start); load_store(r_update, update, 0, i_start); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * r_update[ii]); convert(r_p[ii], r_p_copy[ii]); } load_store(p, r_p, i_start, 0); load_store(p_copy, r_p_copy, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_p[ILP]; MATH_T r_update[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_p[ii] = p[i]; r_update[ii] = update[i]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = r_p[ii] - (ratio * r_update[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { p[i] = r_p[ii]; convert(r_p[ii], p_copy[i]); } } } } } }; void multi_tensor_lamb_compute_update_term_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor per_tensor_beta1, at::Tensor per_tensor_beta2, at::Tensor per_tensor_beta3, at::Tensor per_tensor_bias_correction, const int step, at::Tensor per_tensor_epsilon, const int mode, at::Tensor per_tensor_decay, const float grad_scale) { using namespace at; DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 1, "lamb_stage_1", DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1", multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, DistOptLAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(), per_tensor_beta1.DATA_PTR<scalar_t_2>(), per_tensor_beta2.DATA_PTR<scalar_t_2>(), per_tensor_beta3.DATA_PTR<scalar_t_2>(), per_tensor_bias_correction.DATA_PTR<int>(), step, per_tensor_epsilon.DATA_PTR<scalar_t_2>(), (adamMode_t) mode, per_tensor_decay.DATA_PTR<scalar_t_2>(), grad_scale); ))) AT_CUDA_CHECK(cudaGetLastError()); } void multi_tensor_lamb_update_weights_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor per_tensor_param_norm, at::Tensor per_tensor_update_norm, const float learning_rate, at::Tensor per_tensor_decay, bool use_nvlamb) { using namespace at; DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_2", DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[2][0].scalar_type(), 1, "lamb_stage_2", DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 2, "lamb_stage_2", multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, DistOptLAMBStage2Functor<scalar_t_0, scalar_t_1, scalar_t_2>(), per_tensor_param_norm.DATA_PTR<scalar_t_2>(), per_tensor_update_norm.DATA_PTR<scalar_t_2>(), (scalar_t_2) learning_rate, per_tensor_decay.DATA_PTR<scalar_t_2>(), use_nvlamb); ))) AT_CUDA_CHECK(cudaGetLastError()); }
the_stack
using namespace std; #include <cuda.h> struct Struct_fp_fp_f_f { float *p1; float *p2; float f1; float f2; }; struct Struct_fp { float *p1; }; struct Struct_1float { float f1; }; struct Struct_2floats { float f1; float f2; }; __global__ void struct_byvalue(struct Struct_fp_fp_f_f mystruct, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct_byvalue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_aspointer(struct Struct_2floats *mystruct, float *out) { out[0] = mystruct->f1; out[1] = mystruct->f2; } void testaspointerstruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); struct Struct_2floats mystruct = { 5, 7 }; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct_aspointer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); delete[]hostOut; cuStreamDestroy(stream); } __global__ void kernel_twostructs(struct Struct_fp_fp_f_f mystruct, struct Struct_fp mystruct2) { mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; mystruct2.p1[0] = 11.0f; } void testtwostructs() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 0.0f, 0.0f}; struct Struct_fp mystruct2 = {(float *)gpuFloats3}; kernel_twostructs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, mystruct2); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats3, gpuFloats3, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostFloats3[0] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostFloats3[0] == 11); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; cuStreamDestroy(stream); } __global__ void kernel_structbyval_noptrs(struct Struct_1float mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = 5; } } void teststructbyvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_1float mystruct1 = {8.0f}; kernel_structbyval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 5); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } __global__ void kernel_twostructs_noptrs(struct Struct_2floats *mystruct, struct Struct_1float *mystruct2, struct Struct_1float mystruct3, float *out) { if(threadIdx.x == 0) { out[0] = mystruct->f1; out[1] = mystruct->f2; out[2] = mystruct2->f1; out[3] = mystruct3.f1; } } void test_twostructs_byptr_NoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct = {5.0f, 6.0f}; struct Struct_1float mystruct2 = {7.0f}; struct Struct_1float mystruct3 = {8.0f}; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct Struct_1float *gpu_mystruct2; cudaMalloc((void**)(&gpu_mystruct2), sizeof(mystruct2)); cudaMemcpy(gpu_mystruct2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpu_mystruct2, mystruct3, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; cout << hostFloats1[2] << endl; cout << hostFloats1[3] << endl; assert(hostFloats1[0] == 5); assert(hostFloats1[1] == 6); assert(hostFloats1[2] == 7); assert(hostFloats1[3] == 8); cudaFree(gpuFloats1); cudaFree(gpu_mystruct); cudaFree(gpu_mystruct2); // cudaFree(gpu_mystruct3); delete[] hostFloats1; cuStreamDestroy(stream); } __global__ void kernel_struct2byval_noptrs(struct Struct_2floats mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = mystruct1.f2; } } void teststruct2byvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct1 = {8.0f, 9.0f}; kernel_struct2byval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 9); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } struct struct_f_c_f_c { float f1; char c1; float f2; char c2; }; __global__ void kernel_twostructs_gpuside_singlebuffer(struct struct_f_c_f_c *mystruct1, struct struct_f_c_f_c *mystruct2, float *out) { out[0] = mystruct1->f1; out[1] = mystruct1->f2; out[2] = mystruct2->f1; out[3] = mystruct2->f2; } void test_twostructs_gpuside_singlebuffer() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); char *gpubuf; cudaMalloc((void **)&gpubuf, 1024); int offset1 = 24; int offset2 = 40; struct struct_f_c_f_c mystruct1 = { 5, 0, 7, 0 }; cudaMemcpy(gpubuf + offset1, &mystruct1, sizeof(mystruct1), cudaMemcpyHostToDevice); struct struct_f_c_f_c mystruct2 = { 9, 0, 3, 0 }; cudaMemcpy(gpubuf + offset2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_gpuside_singlebuffer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>( (struct struct_f_c_f_c *)(gpubuf + offset1), (struct struct_f_c_f_c *)(gpubuf + offset2), gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; cout << hostOut[2] << endl; cout << hostOut[3] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); assert(hostOut[2] == 9); assert(hostOut[3] == 3); delete[]hostOut; cuStreamDestroy(stream); } struct NestL2 { float floats[10]; }; struct NestL1 { struct NestL2 n1; struct NestL2 n2; }; struct NestTop { struct NestL1 n1; struct NestL1 n2; }; __global__ void kernelUseNestTop(NestTop nest, float *out) { out[0] = nest.n1.n1.floats[0]; out[1] = nest.n1.n1.floats[1]; } void testKernelUsesNestTop() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostOut = new float[N]; struct NestTop nestTop; nestTop.n1.n1.floats[0] = 5; nestTop.n1.n1.floats[1] = 7; kernelUseNestTop<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(nestTop, (float *)gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); cudaFree(gpuOut); delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_byvalue_withreadnone(struct Struct_fp_fp_f_f mystruct, struct Struct_fp_fp_f_f donothing, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct_withreadnone() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct Struct_fp_fp_f_f donothing = {(float *)0, (float *)0, 0.0f, 0.0f}; struct_byvalue_withreadnone<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, donothing, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } int main(int argc, char *argv[]) { cout << "\ntestvaluestruct" << endl; testbyvaluestruct(); cout << "\ntestaspointersstruct" << endl; testaspointerstruct(); cout << "\ntesttwostructs" << endl; testtwostructs(); cout << "\teststructbyvalNoPtr" << endl; teststructbyvalNoPtr(); cout << "\ntest_twostructs_byptr_NoPtr" << endl; test_twostructs_byptr_NoPtr(); cout << "\teststruct2byvalNoPtr" << endl; teststruct2byvalNoPtr(); cout << "\test_twostructs_gpuside_singlebuffer" << endl; test_twostructs_gpuside_singlebuffer(); cout << "\ntestKernelUsesNestTop" << endl; testKernelUsesNestTop(); cout << "\ntestvaluestruct_withreadnone" << endl; testbyvaluestruct_withreadnone(); return 0; }
the_stack
// Avoid warnings in Eigen includes with CUDA compiler #pragma diag_suppress code_is_unreachable #include "camera_calibration/feature_detection/cuda_refinement_by_matching.cuh" #include <cub/cub.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include <libvis/cuda/cuda_util.h> #include <libvis/logging.h> #include <math_constants.h> #include "camera_calibration/feature_detection/cuda_util.cuh" namespace vis { struct UpdateEquationCoefficients4 { float H_0_0; float H_0_1; float H_0_2; float H_0_3; float H_1_1; float H_1_2; float H_1_3; float H_2_2; float H_2_3; float H_3_3; float b_0; float b_1; float b_2; float b_3; float cost; __forceinline__ __device__ void SetZero() { H_0_0 = 0; H_0_1 = 0; H_0_2 = 0; H_0_3 = 0; H_1_1 = 0; H_1_2 = 0; H_1_3 = 0; H_2_2 = 0; H_2_3 = 0; H_3_3 = 0; b_0 = 0; b_1 = 0; b_2 = 0; b_3 = 0; cost = 0; } }; /// Returns the pattern intensity (0 for black, 1 for white, 0.5 for ill-defined /// positions) at the given position within the pattern. The pattern is supposed /// to have endless extent, feature positions are at integer coordinates, and /// (0, 0) is supposed to correspond to a feature location. __forceinline__ __device__ float PatternIntensityAt(float x, float y, int num_star_segments) { // Have coordinates in [-0.5, 0.5]. float c_x = x - (x > 0 ? 1 : -1) * static_cast<int>(::fabs(x) + 0.5f); float c_y = y - (y > 0 ? 1 : -1) * static_cast<int>(::fabs(y) + 0.5f); if (c_x * c_x + c_y * c_y < 1e-8f) { return 0.5f; } float angle = ::atan2(c_y, c_x) - 0.5f * M_PI; if (angle < 0) { angle += 2 * M_PI; } return (static_cast<int>(num_star_segments * angle / (2 * M_PI)) % 2 == 0) ? 1.f : 0.f; } __global__ void RefineFeatureByMatchingKernel_RenderSamples( int num_star_segments, int num_samples, CUDABuffer_<float2> samples, CUDABuffer_<float> local_pattern_tr_pixel_buffer, int window_half_size, CUDABuffer_<float> rendered_samples) { constexpr int kNumAntiAliasSamples = 16; unsigned int sample_index = blockIdx.x * blockDim.x + threadIdx.x; if (sample_index >= num_samples) { return; } // Load the homography (column-major storage, as is Eigen's default) __shared__ float h[9]; if (threadIdx.x < 9) { h[threadIdx.x] = local_pattern_tr_pixel_buffer(0, 9 * blockIdx.z + threadIdx.x); } __syncthreads(); // Loop over the anti-alias samples float sum = 0; for (int s = 0; s < kNumAntiAliasSamples; ++ s) { // Samples spread in [-0.5, 0.5], i.e., within the range of one pixel. float pixel_offset_x = window_half_size * samples(0, sample_index).x + -0.5 + 1 / 8.f + 1 / 4.f * (s % 4); float pixel_offset_y = window_half_size * samples(0, sample_index).y + -0.5 + 1 / 8.f + 1 / 4.f * (s / 4); float pattern_offset_factor = 1.f / (h[2] * pixel_offset_x + h[5] * pixel_offset_y + h[8]); float pattern_offset_x = (h[0] * pixel_offset_x + h[3] * pixel_offset_y + h[6]) * pattern_offset_factor; float pattern_offset_y = (h[1] * pixel_offset_x + h[4] * pixel_offset_y + h[7]) * pattern_offset_factor; sum += PatternIntensityAt(pattern_offset_x, pattern_offset_y, num_star_segments); } // Normalization by kNumSubpixelSamples is not necessary here since an // affine intensity transformation is optimized for later. rendered_samples(blockIdx.z, sample_index) = sum; } void CallRefineFeatureByMatchingKernel_RenderSamples( cudaStream_t stream, int feature_count, int num_star_segments, int num_samples, const CUDABuffer_<float2>& sample_positions, const CUDABuffer_<float>& local_pattern_tr_pixel_buffer, int window_half_size, const CUDABuffer_<float>& rendered_samples) { #define CALL_KERNEL(block_width_value) \ constexpr int block_width = block_width_value; \ dim3 grid_dim(GetBlockCount(num_samples, block_width), 1, feature_count); \ dim3 block_dim(block_width, 1, 1); \ RefineFeatureByMatchingKernel_RenderSamples \ <<<grid_dim, block_dim, 0, stream>>>( \ num_star_segments, num_samples, sample_positions, local_pattern_tr_pixel_buffer, \ window_half_size, rendered_samples); if (num_samples > 512) { CALL_KERNEL(1024); } else if (num_samples > 256) { CALL_KERNEL(512); } else if (num_samples > 128) { CALL_KERNEL(256); } else { CALL_KERNEL(128); } #undef CALL_KERNEL CHECK_CUDA_NO_ERROR(); } template <int block_width> __global__ void RefineFeatureByMatchingKernel_InitFactorAndBias( int num_samples, CUDABuffer_<float2> samples, CUDABuffer_<float> rendered_samples, cudaTextureObject_t image, float4* states, int window_half_size) { constexpr int block_height = 1; float position_x = states[blockIdx.z].x; float position_y = states[blockIdx.z].y; float sum_qp = 0; float sum_p = 0; float sum_q = 0; float sum_pp = 0; for (int sample_index = /*blockIdx.x * block_width +*/ threadIdx.x; sample_index < num_samples; sample_index += block_width) { float sample_x = position_x + window_half_size * samples(0, sample_index).x + 0.5f; // convert pixel center to pixel corner conv float sample_y = position_y + window_half_size * samples(0, sample_index).y + 0.5f; // convert pixel center to pixel corner conv // if (!ImageContainsPixelCornerConv(sample_x, sample_y, image_width, image_height)) { // return CUDART_INF_F; // } float p = tex2D<float>(image, sample_x, sample_y); float q = rendered_samples(blockIdx.z, sample_index); sum_qp += q * p; sum_p += p; sum_q += q; sum_pp += p * p; } typedef cub::BlockReduce<float, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceFloat; __shared__ typename BlockReduceFloat::TempStorage temp_storage; // TODO: Would it be a good idea for performance to remove some of the // __syncthreads below by using multiple separate shared temp_storage // memory buffers instead of re-using one? sum_qp = BlockReduceFloat(temp_storage).Sum(sum_qp); __syncthreads(); sum_p = BlockReduceFloat(temp_storage).Sum(sum_p); __syncthreads(); sum_q = BlockReduceFloat(temp_storage).Sum(sum_q); __syncthreads(); sum_pp = BlockReduceFloat(temp_storage).Sum(sum_pp); if (threadIdx.x == 0) { float denominator = sum_pp - (sum_p * sum_p / num_samples); if (fabs(denominator) > 1e-6f) { /*factor*/ states[blockIdx.z].z = (sum_qp - (sum_p / num_samples) * sum_q) / denominator; } else { /*factor*/ states[blockIdx.z].z = 1.f; } /*bias*/ states[blockIdx.z].w = (1.f / num_samples) * (sum_q - states[blockIdx.z].z * sum_p); } } void CallRefineFeatureByMatchingKernel_InitFactorAndBias( cudaStream_t stream, int feature_count, int num_samples, const CUDABuffer_<float2>& sample_positions, const CUDABuffer_<float>& rendered_samples, cudaTextureObject_t image, float4* states, int window_half_size) { #define CALL_KERNEL(block_width_value) \ constexpr int block_width = block_width_value; \ dim3 grid_dim(1, 1, feature_count); \ dim3 block_dim(block_width, 1, 1); \ RefineFeatureByMatchingKernel_InitFactorAndBias<block_width> \ <<<grid_dim, block_dim, 0, stream>>>( \ num_samples, sample_positions, rendered_samples, \ image, states, window_half_size); if (num_samples > 512) { CALL_KERNEL(1024); } else if (num_samples > 256) { CALL_KERNEL(512); } else if (num_samples > 128) { CALL_KERNEL(256); } else { CALL_KERNEL(128); } #undef CALL_KERNEL CHECK_CUDA_NO_ERROR(); } __forceinline__ __device__ static void AddCornerRefinementAgainstPatternCostAndJacobian( float position_x, float position_y, float factor, float bias, int sample_index, int feature_index, const CUDABuffer_<float2>& sample_positions, const CUDABuffer_<float>& rendered_samples, float window_half_size, int image_width, int image_height, cudaTextureObject_t image, UpdateEquationCoefficients4* out) { float2 sample = sample_positions(0, sample_index); // Transform pixel center to pixel corner coordinates position_x += 0.5f; position_y += 0.5f; float sample_pos_x = position_x + window_half_size * sample.x; float sample_pos_y = position_y + window_half_size * sample.y; // if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) { // return CUDART_INF_F; // } int ix = static_cast<int>(::max(0.f, sample_pos_x - 0.5f)); int iy = static_cast<int>(::max(0.f, sample_pos_y - 0.5f)); float tx = ::max(0.f, ::min(1.f, sample_pos_x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) float ty = ::max(0.f, ::min(1.f, sample_pos_y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) float top_left = tex2D<float>(image, ix + 0.5f, iy + 0.5f); float top_right = tex2D<float>(image, ix + 1.5f, iy + 0.5f); float bottom_left = tex2D<float>(image, ix + 0.5f, iy + 1.5f); float bottom_right = tex2D<float>(image, ix + 1.5f, iy + 1.5f); float intensity = tex2D<float>(image, sample_pos_x, sample_pos_y); float dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); float residual = factor * intensity + bias - rendered_samples(feature_index, sample_index); float jac_0 = factor * dx; // Jac. wrt. position_x float jac_1 = factor * dy; // Jac. wrt. position_y float jac_2 = intensity; // Jac. wrt. factor constexpr float jac_3 = 1; // Jac. wrt. bias out->H_0_0 += jac_0 * jac_0; out->H_0_1 += jac_0 * jac_1; out->H_0_2 += jac_0 * jac_2; out->H_0_3 += jac_0 * jac_3; out->H_1_1 += jac_1 * jac_1; out->H_1_2 += jac_1 * jac_2; out->H_1_3 += jac_1 * jac_3; out->H_2_2 += jac_2 * jac_2; out->H_2_3 += jac_2 * jac_3; out->H_3_3 += jac_3 * jac_3; out->b_0 += jac_0 * residual; out->b_1 += jac_1 * residual; out->b_2 += jac_2 * residual; out->b_3 += jac_3 * residual; // Should actually be: 0.5f * residual * residual. However, we don't care // about (positive) scaling here. out->cost += residual * residual; } __forceinline__ __device__ static float ComputeCornerRefinementAgainstPatternCost( float position_x, float position_y, float factor, float bias, int sample_index, int feature_index, const CUDABuffer_<float2>& sample_positions, const CUDABuffer_<float>& rendered_samples, float window_half_size, int image_width, int image_height, cudaTextureObject_t image) { float2 sample = sample_positions(0, sample_index); // Transform pixel center to pixel corner coordinates position_x += 0.5f; position_y += 0.5f; float sample_pos_x = position_x + window_half_size * sample.x; float sample_pos_y = position_y + window_half_size * sample.y; if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) { return CUDART_INF_F; } float intensity = tex2D<float>(image, sample_pos_x, sample_pos_y); float residual = factor * intensity + bias - rendered_samples(feature_index, sample_index); // Should actually be: 0.5f * residual * residual. However, we don't care // about (positive) scaling here. return residual * residual; } template <int block_width> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ 1024, /*minBlocksPerMultiprocessor*/ 1) RefineFeatureByMatchingKernel_Refine( int num_samples, CUDABuffer_<float2> sample_positions, CUDABuffer_<float> rendered_samples, cudaTextureObject_t image_texture, float4* states, float* final_cost, int window_half_size, int image_width, int image_height) { constexpr int block_height = 1; __shared__ float test_position_x; __shared__ float test_position_y; __shared__ float test_factor; __shared__ float test_bias; __shared__ float test_cost_shared; typedef cub::BlockReduce<float, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceFloat; __shared__ typename BlockReduceFloat::TempStorage temp_storage; float lambda = -1; float last_step_squared_norm = -1; float original_position_x = states[blockIdx.z].x; if (::isnan(original_position_x)) { return; } float original_position_y = states[blockIdx.z].y; __shared__ float position_x; __shared__ float position_y; __shared__ float factor; __shared__ float bias; if (threadIdx.x == 0) { position_x = original_position_x; position_y = original_position_y; factor = states[blockIdx.z].z; bias = states[blockIdx.z].w; } __shared__ bool applied_update; constexpr int kMaxIterationCount = 50; for (int iteration = 0; iteration < kMaxIterationCount; ++ iteration) { // Compute cost and Jacobian UpdateEquationCoefficients4 coeffs; coeffs.SetZero(); __syncthreads(); // for position_x/y and for BlockReduce for (int sample_index = /*blockIdx.x * block_width +*/ threadIdx.x; sample_index < num_samples; sample_index += block_width) { AddCornerRefinementAgainstPatternCostAndJacobian( position_x, position_y, factor, bias, sample_index, blockIdx.z, sample_positions, rendered_samples, window_half_size, image_width, image_height, image_texture, &coeffs); } // TODO: Test whether it would be helpful for performance to group some of // these together, resulting in higher shared memory usage but less // reductions and less __syncthreads(). Trying to use the whole // UpdateEquationCoefficients4 struct for accumulation resulted in // exceeding the available shared memory. coeffs.H_0_0 = BlockReduceFloat(temp_storage).Sum(coeffs.H_0_0); __syncthreads(); coeffs.H_0_1 = BlockReduceFloat(temp_storage).Sum(coeffs.H_0_1); __syncthreads(); coeffs.H_0_2 = BlockReduceFloat(temp_storage).Sum(coeffs.H_0_2); __syncthreads(); coeffs.H_0_3 = BlockReduceFloat(temp_storage).Sum(coeffs.H_0_3); __syncthreads(); coeffs.H_1_1 = BlockReduceFloat(temp_storage).Sum(coeffs.H_1_1); __syncthreads(); coeffs.H_1_2 = BlockReduceFloat(temp_storage).Sum(coeffs.H_1_2); __syncthreads(); coeffs.H_1_3 = BlockReduceFloat(temp_storage).Sum(coeffs.H_1_3); __syncthreads(); coeffs.H_2_2 = BlockReduceFloat(temp_storage).Sum(coeffs.H_2_2); __syncthreads(); coeffs.H_2_3 = BlockReduceFloat(temp_storage).Sum(coeffs.H_2_3); __syncthreads(); coeffs.H_3_3 = BlockReduceFloat(temp_storage).Sum(coeffs.H_3_3); __syncthreads(); coeffs.b_0 = BlockReduceFloat(temp_storage).Sum(coeffs.b_0); __syncthreads(); coeffs.b_1 = BlockReduceFloat(temp_storage).Sum(coeffs.b_1); __syncthreads(); coeffs.b_2 = BlockReduceFloat(temp_storage).Sum(coeffs.b_2); __syncthreads(); coeffs.b_3 = BlockReduceFloat(temp_storage).Sum(coeffs.b_3); __syncthreads(); coeffs.cost = BlockReduceFloat(temp_storage).Sum(coeffs.cost); // if (threadIdx.x == 0) { // printf("Iteration %i | cost: %f\n", iteration, coeffs.cost); // } // Initialize lambda? if (lambda < 0) { lambda = 0.001f * 0.5f * (coeffs.H_0_0 + coeffs.H_1_1 + coeffs.H_2_2 + coeffs.H_3_3); } applied_update = false; for (int lm_iteration = 0; lm_iteration < 10; ++ lm_iteration) { float H_0_0 = coeffs.H_0_0 + lambda; float H_1_1 = coeffs.H_1_1 + lambda; float H_2_2 = coeffs.H_2_2 + lambda; float H_3_3 = coeffs.H_3_3 + lambda; // Solve for the update. // Perform in-place Cholesky decomposition of H: // https://en.wikipedia.org/wiki/Cholesky_decomposition#The_Cholesky%E2%80%93Banachiewicz_and_Cholesky%E2%80%93Crout_algorithms // Compared to the algorithm in Wikipedia, the matrix is transposed here, // and zero-based indexing is used, so the formulas are: // // H_j_j = sqrtf(H_j_j - sum_{k=0}^{j-1} (H_k_j * H_k_j)) // for diagonal items // H_j_i = ( H_j_i - sum_{k=0}^{j-1} (H_k_i * H_k_j) ) / H_j_j // for off-diagonal items H_0_0 = sqrtf(H_0_0); float H_0_1 = (coeffs.H_0_1) / H_0_0; H_1_1 = sqrtf(H_1_1 - H_0_1 * H_0_1); float H_0_2 = (coeffs.H_0_2) / H_0_0; float H_1_2 = (coeffs.H_1_2 - H_0_2 * H_0_1) / H_1_1; H_2_2 = sqrtf(H_2_2 - H_0_2 * H_0_2 - H_1_2 * H_1_2); float H_0_3 = (coeffs.H_0_3) / H_0_0; float H_1_3 = (coeffs.H_1_3 - H_0_3 * H_0_1) / H_1_1; float H_2_3 = (coeffs.H_2_3 - H_0_3 * H_0_2 - H_1_3 * H_1_2) / H_2_2; H_3_3 = sqrtf(H_3_3 - H_0_3 * H_0_3 - H_1_3 * H_1_3 - H_2_3 * H_2_3); // Solve H * x = b for x. // // (H_0_0 0 0 0) (H_0_0 H_0_1 H_0_2 H_0_3) (x0) (b0) // (H_0_1 H_1_1 0 0) * ( 0 H_1_1 H_1_2 H_1_3) * (x1) = (b1) // (H_0_2 H_1_2 H_2_2 0) ( 0 0 H_2_2 H_2_3) (x2) (b2) // (H_0_3 H_1_3 H_2_3 H_3_3) ( 0 0 0 H_3_3) (x3) (b3) // // Naming the result of the second multiplication y, we get: // // (H_0_0 0 0 0) (y0) (b0) // (H_0_1 H_1_1 0 0) * (y1) = (b1) // (H_0_2 H_1_2 H_2_2 0) (y2) (b2) // (H_0_3 H_1_3 H_2_3 H_3_3) (y3) (b3) // // and: // // (H_0_0 H_0_1 H_0_2 H_0_3) (x0) (y0) // ( 0 H_1_1 H_1_2 H_1_3) * (x1) = (y1) // ( 0 0 H_2_2 H_2_3) (x2) = (y2) // ( 0 0 0 H_3_3) (x3) = (y3) float y0 = (coeffs.b_0) / H_0_0; float y1 = (coeffs.b_1 - H_0_1 * y0) / H_1_1; float y2 = (coeffs.b_2 - H_0_2 * y0 - H_1_2 * y1) / H_2_2; float y3 = (coeffs.b_3 - H_0_3 * y0 - H_1_3 * y1 - H_2_3 * y2) / H_3_3; float x3 = (y3) / H_3_3; float x2 = (y2 - H_2_3 * x3) / H_2_2; float x1 = (y1 - H_1_3 * x3 - H_1_2 * x2) / H_1_1; float x0 = (y0 - H_0_3 * x3 - H_0_2 * x2 - H_0_1 * x1) / H_0_0; // Test whether the update improves the cost. if (threadIdx.x == 0) { test_position_x = position_x - x0; test_position_y = position_y - x1; test_factor = factor - x2; test_bias = bias - x3; } __syncthreads(); // for test_<...> and BlockReduceFloat and applied_update float test_cost_local = 0; for (int sample_index = /*blockIdx.x * block_width +*/ threadIdx.x; sample_index < num_samples; sample_index += block_width) { test_cost_local += ComputeCornerRefinementAgainstPatternCost( test_position_x, test_position_y, test_factor, test_bias, sample_index, blockIdx.z, sample_positions, rendered_samples, window_half_size, image_width, image_height, image_texture); } const float test_cost = BlockReduceFloat(reinterpret_cast<typename BlockReduceFloat::TempStorage&>(temp_storage)).Sum(test_cost_local); // if (threadIdx.x == 0) { // printf(" LM iteration %i | lambda: %f, x_0: %f, x_1: %f, test cost: %f\n", lm_iteration, lambda, x_0, x_1, test_cost); // } if (threadIdx.x == 0) { test_cost_shared = test_cost; if (test_cost < coeffs.cost) { last_step_squared_norm = x0 * x0 + x1 * x1 + x2 * x2 + x3 * x3; position_x = test_position_x; position_y = test_position_y; factor = test_factor; bias = test_bias; lambda *= 0.5f; applied_update = true; } else { lambda *= 2.f; } } __syncthreads(); // for applied_update, position_x/y, test_cost_shared if (::isinf(test_cost_shared)) { // Position went out of bounds // if (threadIdx.x == 0) { // printf(" Position out of bounds\n"); // } states[blockIdx.z].x = CUDART_NAN_F; return; } if (applied_update) { break; } } if (!applied_update) { // Cannot find an update that improves the cost. Treat this as converged. states[blockIdx.z] = make_float4(position_x, position_y, factor, bias); if (final_cost) { final_cost[blockIdx.z] = test_cost_shared; } return; } // Check for divergence. if (fabs(original_position_x - position_x) >= window_half_size || fabs(original_position_y - position_y) >= window_half_size) { // The result is probably not the originally intended corner, // since it is not within the original search window. // if (threadIdx.x == 0) { // printf(" Position too far away from start. original_position_x: %f, position_x: %f, original_position_y: %f, position_y: %f, window_half_size: %i\n", // original_position_x, position_x, original_position_y, position_y, window_half_size); // } states[blockIdx.z].x = CUDART_NAN_F; return; } } if (threadIdx.x == 0) { if (last_step_squared_norm >= 1e-8) { // Not converged // printf(" Not converged\n"); states[blockIdx.z].x = CUDART_NAN_F; } else { // Converged states[blockIdx.z] = make_float4(position_x, position_y, factor, bias); if (final_cost) { final_cost[blockIdx.z] = test_cost_shared; } } } } void CallRefineFeatureByMatchingKernel_Refine( cudaStream_t stream, int feature_count, int num_samples, const CUDABuffer_<float2>& sample_positions, const CUDABuffer_<float>& rendered_samples, cudaTextureObject_t image_texture, float4* states, float* final_cost, int window_half_size, int image_width, int image_height) { #define CALL_KERNEL(block_width_value) \ constexpr int block_width = block_width_value; \ dim3 grid_dim(1, 1, feature_count); \ dim3 block_dim(block_width, 1, 1); \ RefineFeatureByMatchingKernel_Refine<block_width> \ <<<grid_dim, block_dim, 0, stream>>>( \ num_samples, sample_positions, rendered_samples, image_texture, states, \ final_cost, window_half_size, image_width, image_height); if (num_samples > 512) { CALL_KERNEL(1024); } else if (num_samples > 256) { CALL_KERNEL(512); } else if (num_samples > 128) { CALL_KERNEL(256); } else { CALL_KERNEL(128); } #undef CALL_KERNEL CHECK_CUDA_NO_ERROR(); } }
the_stack
#pragma once #include "chrono_gpu/ChGpuDefines.h" #include "chrono_gpu/physics/ChSystemGpu_impl.h" #include "chrono_gpu/physics/ChGpuBoundaryConditions.h" #include "chrono_gpu/cuda/ChCudaMathUtils.cuh" #include "chrono_gpu/cuda/ChGpuHelpers.cuh" //#include "chrono/core/ChMathematics.h" #include <math_constants.h> using chrono::gpu::CHGPU_TIME_INTEGRATOR; using chrono::gpu::CHGPU_FRICTION_MODE; using chrono::gpu::CHGPU_ROLLING_MODE; using chrono::gpu::BC_type; using chrono::gpu::BC_params_t; using chrono::gpu::Sphere_BC_params_t; using chrono::gpu::Z_Cone_BC_params_t; using chrono::gpu::Z_Cylinder_BC_params_t; using chrono::gpu::Plane_BC_params_t; using chrono::gpu::ChSystemGpu_impl; // add bc forces material based only inline __device__ bool addBCForces_Sphere_matBased(unsigned int sphID, unsigned int BC_id, const int64_t3& sphPos, const float3& sphVel, const float3& sphOmega, float3& force_from_BCs, float3& ang_acc_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, ChSystemGpu_impl::GranSphereDataPtr sphere_data, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { Sphere_BC_params_t<int64_t, int64_t3> sphere_params = bc_params.sphere_params; bool contact = false; const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; float penetration = 0; // precompute the int offset int64_t3 delta_int = sphPos - sphere_params.sphere_center; double sph_center_dist = sqrt(Dot(int64_t3_to_double3(delta_int), int64_t3_to_double3(delta_int))); float3 contact_normal = int64_t3_to_float3(delta_int) / sph_center_dist; if (sph_center_dist < (double)(sphere_params.radius + sphereRadius_SU)) { contact = true; penetration = (double)(sphere_params.radius + sphereRadius_SU) - sph_center_dist; } if (contact) { const float m_eff = (gran_params->sphere_mass_SU * sphere_params.mass) / (gran_params->sphere_mass_SU + sphere_params.mass); // normal force part float sqrt_Rd = sqrt(penetration * (float)sphereRadius_SU); float Sn = 2 * gran_params->E_eff_s2w_SU * sqrt_Rd; float loge = (gran_params->COR_s2w_SU < EPSILON) ? log(EPSILON) : log(gran_params->COR_s2w_SU); float beta = loge / sqrt(loge * loge + CUDART_PI_F * CUDART_PI_F); float kn = (2.0 / 3.0) * Sn; float gn = -2 * sqrt(5.0 / 6.0) * beta * sqrt(Sn * m_eff); // project velocity onto the normal float3 v_rel = sphVel - sphere_params.sphere_velo; float projection = Dot(v_rel, contact_normal); // tangential component of relative velocity float3 vrel_n = projection * contact_normal; float3 vrel_t = v_rel - vrel_n; float forceN_mag = kn * penetration - gn * projection; // damping term float3 force_accum = forceN_mag * contact_normal; // tangential force component unsigned int BC_histmap_label = gran_params->nSpheres + BC_id + 1; // tangential component without angular velocity component vrel_t = vrel_t + Cross(sphereRadius_SU * sphVel + sphere_params.radius * sphere_params.sphere_angularVelo, contact_normal); // parameter force_accum as normal force, returned val as tangent force float3 tangent_force = computeFrictionForces_matBased( gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->E_eff_s2w_SU, gran_params->G_eff_s2w_SU, sqrt_Rd, beta, force_accum, vrel_t, contact_normal, m_eff); // TODO: use collision time to check whether or not to apply rolling friction // size_t contact_id = findContactPairInfo(sphere_data, gran_params, sphID, BC_histmap_label); // sphere_data->contact_duration[contact_id] += gran_params->stepSize_SU; // bool calc_rolling_fr = EvaluateRollingFriction(gran_params, gran_params->E_eff_s2w_SU, sphereRadius_SU, beta, // m_eff, sphere_data->contact_duration[contact_id]); float3 normalized_bc_omega = sphere_params.radius / (float)sphereRadius_SU * sphere_params.sphere_angularVelo; // // force_accum is normal force // if (calc_rolling_fr == true){ float3 roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, normalized_bc_omega, (float)sphereRadius_SU * contact_normal); // } else { // roll_acc = make_float3(0.0f, 0.0f, 0.0f); // } ang_acc_from_BCs = ang_acc_from_BCs + (Cross(-1 * contact_normal, tangent_force) / gran_params->sphereInertia_by_r); ang_acc_from_BCs = ang_acc_from_BCs + roll_acc; force_accum = force_accum + tangent_force; force_from_BCs = force_from_BCs + force_accum; float3 torque_accum = Cross(contact_normal, tangent_force) * sphere_params.radius - roll_acc * gran_params->sphereInertia_by_r * sphere_params.radius; if (track_forces) { // accumulate force atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); // accumulate torque atomicAdd(&(bc_params.sphere_params.reaction_torques.x), torque_accum.x); atomicAdd(&(bc_params.sphere_params.reaction_torques.y), torque_accum.y); atomicAdd(&(bc_params.sphere_params.reaction_torques.z), torque_accum.z); } return true; } return false; } inline __device__ bool addBCForces_Sphere_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { Sphere_BC_params_t<int64_t, int64_t3> sphere_params = bc_params.sphere_params; bool contact = false; // classic radius grab, this must be signed to avoid false conversions const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; float reciplength = 0; // precompute the int offset int64_t3 delta_int = sphPos - sphere_params.sphere_center; { // TODO is double even necessary double3 delta = int64_t3_to_double3(delta_int) / (sphere_params.radius + sphereRadius_SU); double d2 = Dot(delta, delta); // this needs to be computed in double, then cast to float reciplength = (float)rsqrt(d2); } // recompute in float to be cheaper float3 delta = int64_t3_to_float3(delta_int) / (sphere_params.radius + sphereRadius_SU); float3 contact_normal = delta * reciplength; float penetration_over_R = 2. - 2. / reciplength; contact = (penetration_over_R > 0); // contact means d2 <1, so 1/d2 > 1, reciplength > 1, penetration_over_R > 0 if (contact) { float3 force_accum = {0, 0, 0}; float force_model_multiplier = sqrt(penetration_over_R); // spring term force_accum = force_accum + sphere_params.normal_sign * gran_params->K_n_s2w_SU * contact_normal * 0.5 * (sphere_params.radius + sphereRadius_SU) * penetration_over_R * force_model_multiplier; // Project relative velocity to the normal float3 rel_vel = sphVel - bc_params.vel_SU; float projection = Dot(rel_vel, contact_normal); // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // add damping term force_accum = force_accum + -gran_params->Gamma_n_s2w_SU * projection * contact_normal * m_eff * force_model_multiplier; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } /// compute frictionless cone normal forces // NOTE: overloaded below inline __device__ bool addBCForces_ZCone_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces, float3& contact_normal, float& dist) { Z_Cone_BC_params_t<int64_t, int64_t3> cone_params = bc_params.cone_params; bool contact = false; // classic radius grab, this must be signed to avoid false conversions const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // no contact here if (sphPos.z >= cone_params.hmax || sphPos.z <= cone_params.hmin) { return false; } // Get vector from cone tip to sphere center // TODO are we concerned about large floats here??? float3 sphere_pos_rel = int64_t3_to_float3(sphPos - cone_params.cone_tip); // NOTE that this could get ugly if Px, Py are very small // get point P on cone directly below sphere float Px = sphere_pos_rel.x; float Py = sphere_pos_rel.y; float Pz = cone_params.slope * sqrt(Px * Px + Py * Py); // line from tip to P float3 l = make_float3(Px, Py, Pz); // component of sphere_pos_rel tangent to cone along line l float3 contact_tangent = l * Dot(sphere_pos_rel, l) / Dot(l, l); // vector from contact point to sphere float3 contact_vector = sphere_pos_rel - contact_tangent; dist = Length(contact_vector); // give us a contact normal contact_normal = contact_vector / dist; // positive means we are penetrating float penetration = sphereRadius_SU - dist; contact = (penetration > 0); // if penetrating and the material is inside (not above or below) the cone, add forces if (contact) { float3 force_accum = {0, 0, 0}; float force_model_multiplier = sqrt(penetration / sphereRadius_SU); // add spring term force_accum = force_accum + cone_params.normal_sign * gran_params->K_n_s2w_SU * penetration * contact_normal * force_model_multiplier; // damping term // Project relative velocity to the normal float3 rel_vel = sphVel - bc_params.vel_SU; float projection = Dot(rel_vel, contact_normal); // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // Compute force updates for damping term force_accum = force_accum + -gran_params->Gamma_n_s2w_SU * projection * contact_normal * m_eff * force_model_multiplier; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } // overload of above if we don't care about dist and contact normal inline __device__ bool addBCForces_ZCone_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float3 contact_normal = {0, 0, 0}; float dist; return addBCForces_ZCone_frictionless(sphPos, sphVel, force_from_BCs, gran_params, bc_params, track_forces, contact_normal, dist); } /// TODO check damping, adhesion inline __device__ bool addBCForces_ZCone(unsigned int sphID, unsigned int BC_id, const int64_t3& sphPos, const float3& sphVel, const float3& sphOmega, float3& force_from_BCs, float3& ang_acc_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, ChSystemGpu_impl::GranSphereDataPtr sphere_data, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { // determine these from frictionless helper float3 force_accum = {0, 0, 0}; float3 contact_normal = {0, 0, 0}; const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // distance of penetration float dist = 0; // determine whether we are in contact bool contact = addBCForces_ZCone_frictionless(sphPos, sphVel, force_accum, gran_params, bc_params, false, contact_normal, dist); if (contact) { // add tangent forces if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) { float projection = Dot(sphVel, contact_normal); float3 sphere_vel_rel = sphVel - bc_params.vel_SU - contact_normal * projection + Cross(sphOmega, -1. * dist * contact_normal); float force_model_multiplier = sqrt((sphereRadius_SU - dist) / sphereRadius_SU); unsigned int BC_histmap_label = gran_params->nSpheres + BC_id + 1; float3 roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, make_float3(0, 0, 0), dist * contact_normal); // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // compute tangent force float3 tangent_force = computeFrictionForces( gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->K_t_s2w_SU, gran_params->Gamma_t_s2w_SU, force_model_multiplier, m_eff, force_accum, sphere_vel_rel, contact_normal); ang_acc_from_BCs = ang_acc_from_BCs + (Cross(-1 * contact_normal, tangent_force) / gran_params->sphereInertia_by_r); ang_acc_from_BCs = ang_acc_from_BCs + roll_acc; force_accum = force_accum + tangent_force; } force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } /// TODO check damping, adhesion inline __device__ bool addBCForces_Plane_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces, float& dist) { Plane_BC_params_t<int64_t3> plane_params = bc_params.plane_params; bool contact = false; // classic radius grab, this must be signed to avoid false conversions const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // Vector from point on plane to sphere center float3 delta_r = int64_t3_to_float3(sphPos - plane_params.position); // projection displacement onto plane normal dist = Dot(plane_params.normal, delta_r); // positive implies radius is bigger than distance, so there is penetration float penetration = sphereRadius_SU - dist; contact = (penetration > 0); if (contact) { float3 force_accum = {0, 0, 0}; float3 contact_normal = plane_params.normal; float force_model_multiplier = sqrt(penetration / sphereRadius_SU); force_accum = gran_params->K_n_s2w_SU * penetration * contact_normal; // point of contact float3 ct_point = (int64_t3_to_float3)(sphPos)-contact_normal * (float)(sphereRadius_SU); float3 bc_velo = bc_params.vel_SU + Cross(plane_params.angular_acc, (ct_point - (int64_t3_to_float3)(plane_params.rotation_center))); float3 rel_vel = sphVel - bc_velo; // project velocity onto the normal float projection = Dot(rel_vel, contact_normal); // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // damping term force_accum = force_accum + -1. * gran_params->Gamma_n_s2w_SU * projection * contact_normal * m_eff; force_accum = force_accum * force_model_multiplier; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } /// LULUTODO: material_based model inline __device__ bool addBCForces_Plane_frictionless_mbased(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces, float& dist, float& sqrt_Rd, float& beta) { Plane_BC_params_t<int64_t3> plane_params = bc_params.plane_params; bool contact = false; // classic radius grab, this must be signed to avoid false conversions const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // Vector from point on plane to sphere center float3 delta_r = int64_t3_to_float3(sphPos - plane_params.position); // projection displacement onto plane normal dist = Dot(plane_params.normal, delta_r); // positive implies radius is bigger than distance, so there is penetration float penetration = sphereRadius_SU - dist; contact = (penetration > 0); if (contact) { float3 force_accum = {0, 0, 0}; // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // normal force part sqrt_Rd = sqrt(penetration * sphereRadius_SU); float Sn = 2 * gran_params->E_eff_s2w_SU * sqrt_Rd; float loge = (gran_params->COR_s2w_SU < EPSILON) ? log(EPSILON) : log(gran_params->COR_s2w_SU); beta = loge / sqrt(loge * loge + CUDART_PI_F * CUDART_PI_F); float kn = (2.0 / 3.0) * Sn; float gn = -2 * sqrt(5.0 / 6.0) * beta * sqrt(Sn * m_eff); float3 contact_normal = plane_params.normal; float3 ct_point = (int64_t3_to_float3)(sphPos)-contact_normal * (float)(sphereRadius_SU); float3 bc_velo = bc_params.vel_SU + Cross(plane_params.angular_acc, (ct_point - (int64_t3_to_float3)(plane_params.rotation_center))); float3 rel_vel = sphVel - bc_velo; // project velocity onto the normal float projection = Dot(rel_vel, contact_normal); float forceN_mag = kn * penetration - gn * projection; // damping term force_accum = forceN_mag * contact_normal; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } /// overload of above in case we don't care about dist inline __device__ bool addBCForces_Plane_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float dist; return addBCForces_Plane_frictionless(sphPos, sphVel, force_from_BCs, gran_params, bc_params, track_forces, dist); } /// overload of above in case we don't care about dist, sqrt_Rd and beta inline __device__ bool addBCForces_Plane_frictionless_mbased(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float dist, sqrt_Rd, beta; return addBCForces_Plane_frictionless_mbased(sphPos, sphVel, force_from_BCs, gran_params, bc_params, track_forces, dist, sqrt_Rd, beta); } inline __device__ bool EvaluateRollingFriction(ChSystemGpu_impl::GranParamsPtr gran_params, const float& E_eff, const float& R_eff, const float& beta, const float& m_eff, const float& time_contact) { float kn_simple = 4.f / 3.f * E_eff * sqrtf(R_eff); float gn_simple = -2.f * sqrtf(5.f / 3.f * m_eff * E_eff) * beta * powf(R_eff, 1.f / 4.f); float d_coeff = gn_simple / (2.f * sqrtf(kn_simple * m_eff)); if (d_coeff < 1) { float t_collision = CUDART_PI_F * sqrtf(m_eff / (kn_simple * (1.f - d_coeff * d_coeff))); if (time_contact <= t_collision * powf(gran_params->LENGTH_UNIT, 0.25f)) { return false; } } return true; } inline __device__ bool addBCForces_Plane(unsigned int sphID, unsigned int BC_id, const int64_t3& sphPos, const float3& sphVel, const float3& sphOmega, float3& force_from_BCs, float3& ang_acc_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, ChSystemGpu_impl::GranSphereDataPtr sphere_data, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float3 force_accum = {0, 0, 0}; float3 contact_normal = bc_params.plane_params.normal; const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; float dist = 0; float beta; float sqrt_Rd; bool contact; if (gran_params->use_mat_based == true) { contact = addBCForces_Plane_frictionless_mbased(sphPos, sphVel, force_accum, gran_params, bc_params, false, dist, sqrt_Rd, beta); } else { contact = addBCForces_Plane_frictionless(sphPos, sphVel, force_accum, gran_params, bc_params, false, dist); } // if we had normal forces, and friction is on, compute tangential forces if (contact) { float3 ct_point = (int64_t3_to_float3)(sphPos)-contact_normal * (float)(sphereRadius_SU); float3 bc_velo = Cross(bc_params.plane_params.angular_acc, (ct_point - (int64_t3_to_float3)(bc_params.plane_params.rotation_center))); // float penetration = sphereRadius_SU - dist; float projection = Dot(sphVel - bc_velo, contact_normal); float3 rel_vel = sphVel - bc_velo - contact_normal * projection + Cross(sphOmega, -1. * dist * contact_normal); // add tangent forces if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) { unsigned int BC_histmap_label = gran_params->nSpheres + BC_id + 1; // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // compute tangent force float3 tangent_force; float3 roll_acc; if (gran_params->use_mat_based == true) { tangent_force = computeFrictionForces_matBased( gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->E_eff_s2w_SU, gran_params->G_eff_s2w_SU, sqrt_Rd, beta, force_accum, rel_vel, contact_normal, m_eff); size_t contact_id = findContactPairInfo(sphere_data, gran_params, sphID, BC_histmap_label); sphere_data->contact_duration[contact_id] += gran_params->stepSize_SU; bool calc_rolling_fr = EvaluateRollingFriction(gran_params, gran_params->E_eff_s2w_SU, sphereRadius_SU, beta, m_eff, sphere_data->contact_duration[contact_id]); if (calc_rolling_fr == true) { roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, make_float3(0, 0, 0), dist * contact_normal); } else { roll_acc = make_float3(0.0f, 0.0f, 0.0f); } // write normal and tangential force for recording if (gran_params->recording_contactInfo == true) { sphere_data->normal_contact_force[contact_id] = force_accum; sphere_data->tangential_friction_force[contact_id] = tangent_force; } } else { float penetration = sphereRadius_SU - dist; float force_model_multiplier = sqrt(penetration / sphereRadius_SU); tangent_force = computeFrictionForces(gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->K_t_s2w_SU, gran_params->Gamma_t_s2w_SU, force_model_multiplier, m_eff, force_accum, rel_vel, contact_normal); roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, make_float3(0, 0, 0), dist * contact_normal); } ang_acc_from_BCs = ang_acc_from_BCs + (Cross(-1 * contact_normal, tangent_force) / gran_params->sphereInertia_by_r); ang_acc_from_BCs = ang_acc_from_BCs + roll_acc; force_accum = force_accum + tangent_force; } force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } inline __device__ bool addBCForces_Zcyl_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces, float3& contact_normal, float& dist) { Z_Cylinder_BC_params_t<int64_t, int64_t3> cyl_params = bc_params.cyl_params; bool contact = false; // classic radius grab signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // Radial vector from cylinder center to sphere center, along inward direction float3 delta_r = make_float3(cyl_params.center.x - sphPos.x, cyl_params.center.y - sphPos.y, 0.f); float dist_delta_r = Length(delta_r); // directional normal contact_normal = cyl_params.normal_sign * delta_r / dist_delta_r; // get penetration into cylinder float penetration = sphereRadius_SU - abs(cyl_params.radius - dist_delta_r); contact = (penetration > 0); // if penetrating and the material is inside the cylinder, add forces if (contact) { dist = cyl_params.radius - dist_delta_r; float force_model_multiplier = sqrt(penetration / sphereRadius_SU); // add spring term float3 force_accum = gran_params->K_n_s2w_SU * penetration * contact_normal * force_model_multiplier; // damping term // Compute force updates for damping term // Project relative velocity to the normal // assume static BC float3 rel_vel = {sphVel.x - bc_params.vel_SU.x, sphVel.y - bc_params.vel_SU.y, 0}; float projection = Dot(rel_vel, contact_normal); // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; force_accum = force_accum + -gran_params->Gamma_n_s2w_SU * projection * contact_normal * m_eff * force_model_multiplier; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } inline __device__ bool addBCForces_Zcyl_frictionless_mbased(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces, float& dist, float& sqrt_Rd, float& beta) { Z_Cylinder_BC_params_t<int64_t, int64_t3> cyl_params = bc_params.cyl_params; bool contact = false; // classic radius grab signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; // Radial vector from cylinder center to sphere center, along inward direction float3 delta_r = make_float3(cyl_params.center.x - sphPos.x, cyl_params.center.y - sphPos.y, 0.f); float dist_delta_r = Length(delta_r); // directional normal float3 contact_normal = cyl_params.normal_sign * delta_r / dist_delta_r; // get penetration into cylinder float penetration = sphereRadius_SU - abs(cyl_params.radius - dist_delta_r); contact = (penetration > 0); // if penetrating and the material is inside the cylinder, add forces if (contact) { dist = cyl_params.radius - dist_delta_r; const float m_eff = gran_params->sphere_mass_SU; // normal force part sqrt_Rd = sqrt(penetration * sphereRadius_SU); float Sn = 2 * gran_params->E_eff_s2w_SU * sqrt_Rd; float loge = (gran_params->COR_s2w_SU < EPSILON) ? log(EPSILON) : log(gran_params->COR_s2w_SU); beta = loge / sqrt(loge * loge + CUDART_PI_F * CUDART_PI_F); float kn = (2.0 / 3.0) * Sn; float gn = -2 * sqrt(5.0 / 6.0) * beta * sqrt(Sn * m_eff); // project velocity onto the normal float projection = Dot(sphVel, contact_normal); float forceN_mag = kn * penetration - gn * projection; // damping term float3 force_accum = forceN_mag * contact_normal; force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; } /// minimal overload for dist and contact_normal params inline __device__ bool addBCForces_Zcyl_frictionless(const int64_t3& sphPos, const float3& sphVel, float3& force_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float3 contact_normal = {0, 0, 0}; float dist; return addBCForces_Zcyl_frictionless(sphPos, sphVel, force_from_BCs, gran_params, bc_params, track_forces, contact_normal, dist); } /// TODO check damping, adhesion inline __device__ bool addBCForces_Zcyl(unsigned int sphID, unsigned int BC_id, const int64_t3& sphPos, const float3& sphVel, const float3& sphOmega, float3& force_from_BCs, float3& ang_acc_from_BCs, ChSystemGpu_impl::GranParamsPtr gran_params, ChSystemGpu_impl::GranSphereDataPtr sphere_data, BC_params_t<int64_t, int64_t3>& bc_params, bool track_forces) { float3 force_accum = {0, 0, 0}; float3 contact_normal = {0, 0, 0}; const signed int sphereRadius_SU = (signed int)gran_params->sphereRadius_SU; float dist = 0; float beta = 0; float sqrt_Rd = 0; bool contact; if (gran_params->use_mat_based == true) { contact = addBCForces_Zcyl_frictionless_mbased(sphPos, sphVel, force_accum, gran_params, bc_params, track_forces, dist, sqrt_Rd, beta); } else { contact = addBCForces_Zcyl_frictionless(sphPos, sphVel, force_accum, gran_params, bc_params, false, contact_normal, dist); } // if we had normal forces, and friction is on, compute tangential forces if (contact) { float penetration = sphereRadius_SU - dist; float projection = Dot(sphVel, contact_normal); float3 rel_vel = sphVel - contact_normal * projection + Cross(sphOmega, -1. * dist * contact_normal); // add tangent forces if (gran_params->friction_mode != chrono::gpu::CHGPU_FRICTION_MODE::FRICTIONLESS) { unsigned int BC_histmap_label = gran_params->nSpheres + BC_id + 1; // assume bc mass is infinite const float m_eff = gran_params->sphere_mass_SU; // compute tangent force float3 tangent_force = {0.f, 0.f, 0.f}; float3 roll_acc = {0.f, 0.f, 0.f}; if (gran_params->use_mat_based == true) { tangent_force = computeFrictionForces_matBased( gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->E_eff_s2w_SU, gran_params->G_eff_s2w_SU, sqrt_Rd, beta, force_accum, rel_vel, contact_normal, m_eff); size_t contact_id = findContactPairInfo(sphere_data, gran_params, sphID, BC_histmap_label); sphere_data->contact_duration[contact_id] += gran_params->stepSize_SU; bool calc_rolling_fr = EvaluateRollingFriction(gran_params, gran_params->E_eff_s2w_SU, sphereRadius_SU, beta, m_eff, sphere_data->contact_duration[contact_id]); if (calc_rolling_fr == true) { roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, make_float3(0, 0, 0), dist * contact_normal); } else { roll_acc = make_float3(0.0f, 0.0f, 0.0f); } } else { float penetration = sphereRadius_SU - dist; float force_model_multiplier = sqrt(penetration / sphereRadius_SU); roll_acc = computeRollingAngAcc(sphere_data, gran_params, gran_params->rolling_coeff_s2w_SU, gran_params->spinning_coeff_s2w_SU, force_accum, sphOmega, make_float3(0, 0, 0), dist * contact_normal); // compute tangent force tangent_force = computeFrictionForces(gran_params, sphere_data, sphID, BC_histmap_label, gran_params->static_friction_coeff_s2w, gran_params->K_t_s2w_SU, gran_params->Gamma_t_s2w_SU, force_model_multiplier, m_eff, force_accum, rel_vel, contact_normal); } ang_acc_from_BCs = ang_acc_from_BCs + (Cross(-1 * contact_normal, tangent_force) / gran_params->sphereInertia_by_r); ang_acc_from_BCs = ang_acc_from_BCs + roll_acc; force_accum = force_accum + tangent_force; } force_from_BCs = force_from_BCs + force_accum; if (track_forces) { atomicAdd(&(bc_params.reaction_forces.x), -force_accum.x); atomicAdd(&(bc_params.reaction_forces.y), -force_accum.y); atomicAdd(&(bc_params.reaction_forces.z), -force_accum.z); } } return contact; }
the_stack
#include <cuda.h> /** * @file * @section Description * * This file contains the virg_vm_gpu() function and virginia_gpu(), the cuda * kernel virtual machine. This is the only file that is compiled with nvcc, * rather than gcc or icc. */ extern "C" { #include "virginian.h" } // defining VIRG_NOTWOSTEP disables the two step result writing procedure that // is efficent for mapped memory #ifdef VIRG_NOTWOSTEP #define VIRG_NOTWOSTEP 1 #else #define VIRG_NOTWOSTEP 0 #endif /// Used in testing to return size test array from gpu-compiled code const size_t *virg_gpu_getsizes() { return &virg_testsizes[0]; } /// GPU constant memory variable to hold the virtual machine execution context __constant__ virg_vm vm; /** * GPU constant memory array to hold the tablet meta information for each of * the tablet slots allocated on the GPU */ __constant__ virg_tablet_meta meta[VIRG_GPU_TABLETS]; /// total number of result rows output during mapped memory vm execution __device__ unsigned row_counter; /// total number of result rows that have reached global memory __device__ unsigned rowbuff_counter; /// counter incremented by threadblocks when they write __device__ unsigned threadblock_order; __global__ void virginia_gpu(unsigned tab_slot, unsigned res_slot, void* tab_, void* res_, unsigned start_row, unsigned num_rows, void *scratch); /** * @ingroup vm * @brief Execute the data-parallel portion of an opcode program on a GPU * * This function handles setting up and launching the virtual machine kernel * that executes the virtual machine on the GPU. The user has a choice between * serial kernel executions for each tablet, streaming executions with * overlapping memcpys, and mapped execution, which is currently the fastest * option. * * If streaming and memory mapping are both disabled in the virginian struct, * then kernels will be processed on the GPU serially. This means that the data * tablet will be transferred to GPU memory, the virtual machine executed, then * the result tablet transferred back for each tablet with no overlap. This is * the slowest method of GPU execution, and it does not require pinned memory. * * If streaming is enabled, regardless of the mapped memory setting, then it is * used. This works by allocating a fixed number of tablet streams, set equal to * half of the number of allocated GPU tablet slots, and overlapping data * transfer, kernel execution, and result transfer for tablets. Each stream gets * a data and result tablet slot on the GPU, and the loop iterates through each * stream in a round-robin fashion executing virtual machines on tablets. Note * that when we loop back and re-use streams then we block to wait for execution * if the previous asynchronous launches. On Tesla C1060 hardware you cannot * overlap transfers to the GPU with transfers from it, and thus streaming is * about as fast as serial execution. Also note that on this hardware the * semantics for asynchronous operations are somewhat frusterating. Based on * informal tests, it appears that even though asynchronous operations are * completely asynchronous with respect to the calling thread, the order in * which they are added to streams has an effect on when they are run. Thus, * there are non-optimal orderings of memory copies and kernel executions when * multiple asynchronous operations are queued in multiple streams. Even with * smarter ordering, however, the inability of current hardware to transfer both * ways across the PCI bus simultaneously means that this method of execution is * currently only useful if either the data or results of a query are resident * in GPU memory for the entire processing operation. For some reason I could * only create 6 streams at a time during my testing, so you may encounter * problems if you allocate more than 12 gpu tablet slots. * * Mapped memory is currently the fastest method for GPU processing by far. It * is also much simpler, since we have fewer memory copies and we don't have to * manage multiple streams at the same time. The tablet row counter is * implemented as a separate variable for mapped memory, since it would be very * very expensive to perform atomic operations on a mapped location. * * @param v Pointer to the state struct of the database system * @param vm Pointer to the context struct of the virtual machine * @param tab Pointer to the pointer to the current data tablet to process * @param res Pointer to the pointer to the current result tablet * @param num_tablets Number of tablets to process on the GPU, 0 if as many as * possible * @return VIRG_SUCCESS or VIRG_FAIL depending on errors during the function * call */ int virg_vm_gpu(virginian *v, virg_vm *vm_, virg_tablet_meta **tab, virg_tablet_meta **res, unsigned num_tablets) { unsigned proced = 0; //num_tablets = 5; VIRG_CHECK(v->threads_per_block != VIRG_THREADSPERBLOCK, "Cannot change compile-time threads per block"); VIRG_CHECK(v->threads_per_block == v->threads_per_block & 0xFFFFFFC0, "Threads per block must be a multiple of 64"); // execute GPU kernels in serial with no overlapping memory copies if(v->use_stream == 0 && v->use_mmap == 0) { // copy virtual machine context to constant memory cudaMemcpyToSymbol((char*)&vm, (char*)vm_, sizeof(virg_vm), 0, cudaMemcpyHostToDevice); VIRG_CUDCHK("serial const memcpy 1"); // copy result meta information to constant memory // we only need to do this once for multiple kernel calls because the // information about the column spacing is identical between all result // tablets cudaMemcpyToSymbol((char*)&meta, (char*)res[0], sizeof(virg_tablet_meta), sizeof(virg_tablet_meta), cudaMemcpyHostToDevice); VIRG_CUDCHK("serial const memcpy 2"); void *tab_slot = v->gpu_slots; void *res_slot = (char*)v->gpu_slots + VIRG_TABLET_SIZE; // create timers cudaEvent_t start, data, kernel, results; cudaEventCreate(&start); cudaEventCreate(&data); cudaEventCreate(&kernel); cudaEventCreate(&results); vm_->timing1 = 0; vm_->timing2 = 0; vm_->timing3 = 0; while(1) { VIRG_CUDCHK("const clear"); VIRG_CUDCHK("before serial const 2 memcpy"); // copy data tablet meta information to constant memory cudaMemcpyToSymbol((char*)&meta, (char*)tab[0], sizeof(virg_tablet_meta), 0, cudaMemcpyHostToDevice); VIRG_CUDCHK("serial const 2 memcpy"); // round threadblocks up given number of rows to process and threads // per block unsigned rows = tab[0]->rows; int blocks = (rows + v->threads_per_block - 1) / v->threads_per_block; assert(blocks < 65536); // start timer cudaEventRecord(start, 0); // copy entire data tablet to GPU memory cudaMemcpy(tab_slot, (char*)tab[0], tab[0]->size, cudaMemcpyHostToDevice); // copy res meta information to GPU memory, where the rows element // will be updated as result rows are output cudaMemcpy(res_slot, (char*)res[0], sizeof(virg_tablet_meta), cudaMemcpyHostToDevice); VIRG_CUDCHK("data memcpy"); #ifdef VIRG_DEBUG cudaMemset((char*)res_slot + sizeof(virg_tablet_meta), 0xDEADBEEF, VIRG_TABLET_SIZE - sizeof(virg_tablet_meta)); #endif // record we're done with data transfer cudaEventRecord(data, 0); virg_timer_start(); // kernel launch void* tab_arg = v->gpu_slots; void* res_arg = (char*)v->gpu_slots + VIRG_TABLET_SIZE; virginia_gpu<<<blocks, v->threads_per_block>>> (0, 1, tab_arg, res_arg, 0, 0, NULL); cudaDeviceSynchronize(); VIRG_CUDCHK("Single kernel launch"); // record we're done with the kernel call cudaEventRecord(kernel, 0); // transfer result tablet back from GPU memory cudaMemcpy((char*)res[0], res_slot, VIRG_TABLET_SIZE, cudaMemcpyDeviceToHost); //virg_print_tablet_meta(res[0]); // record we're done with the results transfer cudaEventRecord(results, 0); // output timing results for this tablet float f[3]; cudaEventElapsedTime(&f[0], start, data); cudaEventElapsedTime(&f[1], data, kernel); cudaEventSynchronize(results); cudaEventElapsedTime(&f[2], kernel, results); //fprintf(stderr, "serial block %u: %f %f %f %f\n", proced, cum, f[0], f[1], f[2]); vm_->timing1 += f[0]; vm_->timing2 += f[1]; vm_->timing3 += f[2]; proced++; // if we've processed enough tablets exit the loop if(tab[0]->last_tablet || (num_tablets != 0 && proced >= num_tablets)) break; // load next data tablet virg_db_loadnext(v, tab); // if this tablet has no rows, break from this loop // this occurs when a new data tablet is created during an insert // operation but no rows have been added to it yet if(tab[0]->rows == 0) break; // safely allocate next result tablet virg_tablet_meta *temp = res[0]; virg_vm_allocresult(v, vm_, res, res[0]); virg_tablet_unlock(v, temp->id); } vm_->timing1 /= 1000; vm_->timing2 /= 1000; vm_->timing3 /= 1000; // destruct timers cudaEventDestroy(start); cudaEventDestroy(data); cudaEventDestroy(kernel); cudaEventDestroy(results); } // if the streaming functionality is turned on else if(v->use_stream) { // copy virtual machine context to GPU constant memory cudaMemcpyToSymbol((char*)&vm, (char*)vm_, sizeof(virg_vm), 0, cudaMemcpyHostToDevice); VIRG_CUDCHK("const memcpy"); // we should always have an even number of tablets assert(VIRG_GPU_TABLETS % 2 == 0); unsigned stream_width = VIRG_GPU_TABLETS / 2; cudaStream_t stream[stream_width]; unsigned slot_ids[stream_width]; int slot_wait = 0; unsigned i; // construct streams for(i = 0; i < stream_width; i++) cudaStreamCreate(&stream[i]); VIRG_CUDCHK("stream create"); // create timers for each stream independently cudaEvent_t ev_create[stream_width], ev_start[stream_width], ev_data[stream_width], ev_kernel[stream_width], ev_results[stream_width]; for(i = 0; i < stream_width; i++) { cudaEventCreate(&ev_create[i]); cudaEventCreate(&ev_start[i]); cudaEventCreate(&ev_data[i]); cudaEventCreate(&ev_kernel[i]); cudaEventCreate(&ev_results[i]); } // start timer for each stream for(i = 0; i < stream_width; i++) cudaEventRecord(ev_create[i], stream[i]); // process tablets until finished for(i = 0; 1; i++) { // if every stream has been used, go back to use the first stream // again if(i >= stream_width) { i = 0; slot_wait = 1; } // if we are re-using streams then we need to block until they are // actually finished if(slot_wait) { VIRG_CUDCHK("before stream synchronize"); // block cudaStreamSynchronize(stream[i]); VIRG_CUDCHK("stream synchronize"); // unlock the tablets that the stream was using virg_tablet_unlock(v, slot_ids[i * 2]); virg_tablet_unlock(v, slot_ids[i * 2 + 1]); // record processing completion and output times cudaEventSynchronize(ev_results[i]); float f[4]; cudaEventElapsedTime(&f[0], ev_create[i], ev_start[i]); cudaEventElapsedTime(&f[1], ev_start[i], ev_data[i]); cudaEventElapsedTime(&f[2], ev_data[i], ev_kernel[i]); cudaEventElapsedTime(&f[3], ev_kernel[i], ev_results[i]); fprintf(stderr, "stream %u: %f %f %f %f\n", i, f[0], f[1], f[2], f[3]); } // if the data tablet doesn't have any rows then we're finished if(tab[0]->rows == 0) { proced++; slot_ids[i * 2] = tab[0]->id; slot_ids[i * 2 + 1] = res[0]->id; break; } #ifdef VIRG_DEBUG virg_tablet_check(tab[0]); #endif // round up blocks given rows to process and threads per block int blocks = (tab[0]->rows + v->threads_per_block - 1) / v->threads_per_block; assert(blocks < 65536); virg_tablet_meta *temp_tab, *temp_res; // if there are still tablets to process load, otherwise don't // note that we don't exit here because we need to wait for the // other streams to finish if(!tab[0]->last_tablet && !(num_tablets != 0 && proced + 1 > num_tablets)) { virg_db_load(v, tab[0]->next, &temp_tab); virg_vm_allocresult(v, vm_, &temp_res, res[0]); } // start timer for this stream cudaEventRecord(ev_start[i], stream[i]); // start tablet memcpy for this stream cudaMemcpyAsync((char*)v->gpu_slots + (i * 2) * VIRG_TABLET_SIZE, (char*)tab[0], tab[0]->size, cudaMemcpyHostToDevice, stream[i]); //virg_print_tablet_meta(tab[0]); VIRG_CUDCHK("tab memcpy"); // start tablet meta to constant memory memcpy for this stream cudaMemcpyToSymbolAsync((char*)&meta, (char*)tab[0], sizeof(virg_tablet_meta), i * 2 * sizeof(virg_tablet_meta), cudaMemcpyHostToDevice, stream[i]); VIRG_CUDCHK("tab meta"); // if we haven't put the result meta information in this stream's // constant memory area yet if(!slot_wait) { // copy result meta information for this stream cudaMemcpyToSymbolAsync((char*)&meta, (char*)res[0], sizeof(virg_tablet_meta), (i * 2 + 1) * sizeof(virg_tablet_meta), cudaMemcpyHostToDevice, stream[i]); VIRG_CUDCHK("res meta"); } // copy meta information to global memory as well so that the rows // variable can be updated during query execution cudaMemcpyAsync((char*)v->gpu_slots + (i * 2 + 1) * VIRG_TABLET_SIZE, (char*)res[0], sizeof(virg_tablet_meta), cudaMemcpyHostToDevice, stream[i]); VIRG_CUDCHK("res setup memcpy"); // record we're done with data transfer for this stream cudaEventRecord(ev_data[i], stream[i]); // launch the kernel for this stream void *tab_arg = (char*)v->gpu_slots + (i * 2) * VIRG_TABLET_SIZE; void *res_arg = (char*)v->gpu_slots + (i * 2 + 1) * VIRG_TABLET_SIZE; virginia_gpu<<<blocks, v->threads_per_block, 0, stream[i]>>> (i * 2, i * 2 + 1, tab_arg, res_arg, 0, 0, NULL); VIRG_CUDCHK("kernel"); // record that the kernel execution has finished cudaEventRecord(ev_kernel[i], stream[i]); // copy result tablet back for this stream cudaMemcpyAsync((char*)res[0], (char*)v->gpu_slots + (i * 2 + 1) * VIRG_TABLET_SIZE, VIRG_TABLET_SIZE, cudaMemcpyDeviceToHost, stream[i]); VIRG_CUDCHK("res memcpy"); // record that we're done with the result transfer for this stream cudaEventRecord(ev_results[i], stream[i]); // store the current data and result tablet pointers in the stream's // slot proced++; slot_ids[i * 2] = tab[0]->id; slot_ids[i * 2 + 1] = res[0]->id; // check if we've processed enough tablets if(tab[0]->last_tablet || (num_tablets != 0 && proced >= num_tablets)) break; tab[0] = temp_tab; res[0] = temp_res; } i++; unsigned j; if(!slot_wait) i = 0; // for each unfinished stream for(j = 0; j < VIRG_MIN(stream_width, proced); j++, i++) { if(i >= stream_width) i = 0; // wait for the stream to finish and print timing information cudaStreamSynchronize(stream[i]); cudaEventSynchronize(ev_results[i]); float f[4]; cudaEventElapsedTime(&f[0], ev_create[i], ev_start[i]); cudaEventElapsedTime(&f[1], ev_start[i], ev_data[i]); cudaEventElapsedTime(&f[2], ev_data[i], ev_kernel[i]); cudaEventElapsedTime(&f[3], ev_kernel[i], ev_results[i]); //fprintf(stderr, "stream %u: %f %f %f %f\n", i, f[0], f[1], f[2], f[3]); // leave last data and result tablets locked if(j < stream_width - 1 && j < proced - 1) { virg_tablet_unlock(v, slot_ids[i * 2]); virg_tablet_unlock(v, slot_ids[i * 2 + 1]); } } // destruct timers and streams for(i = 0; i < stream_width; i++) { cudaEventDestroy(ev_create[i]); cudaEventDestroy(ev_start[i]); cudaEventDestroy(ev_data[i]); cudaEventDestroy(ev_kernel[i]); cudaEventDestroy(ev_results[i]); cudaStreamDestroy(stream[i]); } } // memory mapped kernel execution else if(v->use_mmap) { assert(VIRG_GPU_TABLETS >= 2); #ifdef VIRG_NOPINNED VIRG_CHECK(1, "cannot use mapped execution without pinned memory"); #endif // copy virtual machine context into gpu constant memory cudaMemcpyToSymbol((char*)&vm, (char*)vm_, sizeof(virg_vm), 0, cudaMemcpyHostToDevice); // copy result tablet meta data into gpu constant memory // this needs to be done only once since the column sizes etc don't // change cudaMemcpyToSymbol((char*)&meta, (char*)res[0], sizeof(virg_tablet_meta), sizeof(virg_tablet_meta), cudaMemcpyHostToDevice); VIRG_CUDCHK("mapped const memcpy"); cudaMemcpyToSymbol((char*)&meta, (char*)tab[0], sizeof(virg_tablet_meta), 0, cudaMemcpyHostToDevice); VIRG_CUDCHK("mapped const 2 memcpy"); // construct timers cudaEvent_t start, data, kernel, results; cudaEventCreate(&start); cudaEventCreate(&data); cudaEventCreate(&kernel); cudaEventCreate(&results); float cum = 0; while(1) { // start timer cudaEventRecord(start, 0); //virg_print_tablet_meta(tab[0]); //fprintf(stderr, "::::%u\n", sizeof(virg_tablet_meta)); // copy tablet meta information to gpu constant memory VIRG_CUDCHK("before const 2 memcpy"); cudaMemcpyToSymbol((char*)&meta, (char*)tab[0], sizeof(virg_tablet_meta), 0, cudaMemcpyHostToDevice); VIRG_CUDCHK("const 2 memcpy"); // round number of thread blocks up given the number of rows to // process and the threads per block unsigned rows = tab[0]->rows; int blocks = (rows + v->threads_per_block - 1) / v->threads_per_block; assert(blocks < 65536); unsigned zero = 0; // copy 0 into the result row counter cudaMemcpyToSymbol((char*)&row_counter, (char*)&zero, sizeof(unsigned), 0, cudaMemcpyHostToDevice); // copy 0 into the result row buffer counter cudaMemcpyToSymbol((char*)&rowbuff_counter, (char*)&zero, sizeof(unsigned), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol((char*)&threadblock_order, (char*)&zero, sizeof(unsigned), 0, cudaMemcpyHostToDevice); cudaMemset((char*)v->gpu_slots + VIRG_TABLET_SIZE, 0, sizeof(unsigned) * blocks); VIRG_CUDCHK("row_counter set"); // record that we're done transferring data // since we're using mapped memory this is negligible since we just // need to set constant memory and 2 variables cudaEventRecord(data, 0); // get gpu pointers to the data and result tablets in main memory void *tab_arg; void *res_arg; cudaHostGetDevicePointer(&tab_arg, tab[0], 0); VIRG_CUDCHK("get tab device ptr"); cudaHostGetDevicePointer(&res_arg, res[0], 0); VIRG_CUDCHK("get res device ptr"); // launch kernel using mapped pointers virginia_gpu<<<blocks, v->threads_per_block>>> (0, 1, tab_arg, res_arg, 0, 0, v->gpu_slots); VIRG_CUDCHK("Single mapped kernel launch"); // record we're done with the kernel call cudaEventRecord(kernel, 0); // copy the number of tablet result rows from gpu memory cudaMemcpyFromSymbol((char*)&res[0]->rows, (char*)&row_counter, sizeof(unsigned), 0, cudaMemcpyDeviceToHost); // record that we're done transferring results information // this should also be negligible cudaEventRecord(results, 0); // get timing results float f[3]; cudaEventElapsedTime(&f[0], start, data); cudaEventElapsedTime(&f[1], data, kernel); cudaEventSynchronize(results); cudaEventElapsedTime(&f[2], kernel, results); // print timing information //fprintf(stderr, "block %u: %f %f %f %f\n", proced, cum, f[0], f[1], f[2]); // add to cumulative time cum += f[0] + f[1] + f[2]; proced++; // check if we're done processing tablets if(tab[0]->last_tablet || (num_tablets != 0 && proced >= num_tablets)) break; // load next data tablet virg_db_loadnext(v, tab); // if this data tablet has no rows, finish if(tab[0]->rows == 0) break; virg_tablet_meta *temp = res[0]; virg_vm_allocresult(v, vm_, res, res[0]); virg_tablet_unlock(v, temp->id); } // destruct timers cudaEventDestroy(start); cudaEventDestroy(data); cudaEventDestroy(kernel); cudaEventDestroy(results); } // wait for all cuda operations to finish cudaDeviceSynchronize(); return VIRG_SUCCESS; } /// shared memory used for reductions __shared__ int reduct[512]; /// shared memory location for the start of this threadblock's result area __shared__ unsigned bstart; /// shared memory variable for the number of rows output by this threadblock __shared__ unsigned block; /// where in the order this threadblock writes its results __shared__ unsigned shared_blockorder; /// how many rows have been written in the first result block __shared__ unsigned thisblockwritten; /// how many rows have been written in the second result block __shared__ unsigned nextblockwritten; #define OPARGS ( \ virg_op op, \ virg_vm_context &context, \ virg_tablet_meta *meta_tab, \ virg_tablet_meta *meta_res, \ void *tab, \ virg_tablet_meta *res, \ void *scratch, \ int &valid, \ unsigned &pc, \ unsigned &pc_wait) __device__ __forceinline__ void op_Column OPARGS { char *p = (char*)tab + meta_tab->fixed_block; unsigned row = blockIdx.x * blockDim.x + threadIdx.x; p += meta_tab->fixed_offset[op.p2] + meta_tab->fixed_stride[op.p2] * row; switch(meta_tab->fixed_stride[op.p2]) { case 4: context.reg[op.p1].i = *((int*)p); break; case 8: context.reg[op.p1].d = *((double*)p); break; case 1: context.reg[op.p1].c = *p; break; } context.type[op.p1] = meta_tab->fixed_type[op.p2]; context.stride[op.p1] = meta_tab->fixed_stride[op.p2]; } __device__ __forceinline__ void op_Rowid OPARGS { unsigned row = blockIdx.x * blockDim.x + threadIdx.x; char *p = (char*)tab + meta_tab->key_block + meta_tab->key_stride * row; switch(meta_tab->fixed_stride[op.p2]) { case 4: context.reg[op.p1].i = *((int*)p); break; case 8: context.reg[op.p1].d = *((double*)p); break; case 1: context.reg[op.p1].c = *p; break; } context.type[op.p1] = meta_tab->fixed_type[op.p2]; context.stride[op.p1] = meta_tab->fixed_stride[op.p2]; } __device__ __forceinline__ void op_Integer OPARGS { context.reg[op.p1].i = op.p2; context.type[op.p1] = VIRG_INT; context.stride[op.p1] = sizeof(int); } __device__ __forceinline__ void op_Float OPARGS { context.reg[op.p1].f = op.p4.f; context.type[op.p1] = VIRG_FLOAT; context.stride[op.p1] = sizeof(float); } __device__ __forceinline__ void op_Invalid OPARGS { valid = 0; } /** * A macro to compare identically typed register values and manipulate each * thread's program counter based on the result of this comparison. This is * implemented as a macro so that the comparison operator can be easily changed, * for example REGCMP(<=), used by the Le opcode. */ #define REGCMP(cmpop) \ int x = 0; \ switch(context.type[op.p1]) { \ case VIRG_INT: \ x = (context.reg[op.p1].i cmpop context.reg[op.p2].i); \ break; \ case VIRG_FLOAT: \ x = (context.reg[op.p1].f cmpop context.reg[op.p2].f); \ break; \ case VIRG_INT64: \ x = (context.reg[op.p1].li cmpop context.reg[op.p2].li); \ break; \ case VIRG_DOUBLE: \ x = (context.reg[op.p1].d cmpop context.reg[op.p2].d); \ break; \ case VIRG_CHAR: \ x = (context.reg[op.p1].c cmpop context.reg[op.p2].c); \ break; \ } \ if(x) { \ if(valid) \ valid = op.p4.i; \ pc_wait = op.p3 - pc - 1; \ } __device__ __forceinline__ void op_Neq OPARGS { REGCMP(!=) } __device__ __forceinline__ void op_Gt OPARGS { REGCMP(>) } __device__ __forceinline__ void op_Ge OPARGS { REGCMP(>=) } __device__ __forceinline__ void op_Lt OPARGS { REGCMP(<) } __device__ __forceinline__ void op_Le OPARGS { REGCMP(<=) } __device__ __forceinline__ void op_And OPARGS { REGCMP(&&) } __device__ __forceinline__ void op_Or OPARGS { REGCMP(||) } __device__ __forceinline__ void op_Eq OPARGS { int x = 0; switch(context.type[op.p1]) { case VIRG_INT: x = (context.reg[op.p1].i == context.reg[op.p2].i); break; case VIRG_FLOAT: { float f = context.reg[op.p1].f - context.reg[op.p2].f; x = (f <= VIRG_FLOAT_ERROR && f >= -VIRG_FLOAT_ERROR); break; } case VIRG_INT64: x = (context.reg[op.p1].li == context.reg[op.p2].li); break; case VIRG_DOUBLE: { float d = context.reg[op.p1].f - context.reg[op.p2].f; x = (d < VIRG_FLOAT_ERROR && d > -VIRG_FLOAT_ERROR); break; } case VIRG_CHAR: x = (context.reg[op.p1].c == context.reg[op.p2].c); break; } if(x) { if(valid) valid = op.p4.i; pc_wait = op.p3 - pc - 1; } } __device__ __forceinline__ void op_Not OPARGS { int x = 0; switch(context.type[op.p1]) { case VIRG_INT: x = (context.reg[op.p1].i ? 1 : 0); break; case VIRG_FLOAT: x = (context.reg[op.p1].f ? 1 : 0); break; case VIRG_INT64: x = (context.reg[op.p1].li ? 1 : 0); break; case VIRG_DOUBLE: x = (context.reg[op.p1].d ? 1 : 0); break; case VIRG_CHAR: x = (context.reg[op.p1].c ? 1 : 0); break; } if(!x) { if(valid) valid = op.p4.i; pc_wait = op.p3 - pc - 1; } } /** A macro to perform a mathematical operation of the form * reg[p1] = reg[p2] operator reg[p3]. Like regcmp, this is used so that * multiple opcodes can use this code and easily change the math operator, as in * MATHOP(+). */ #define MATHOP(mop) \ switch(context.type[op.p2]) { \ case VIRG_INT: \ context.reg[op.p1].i = \ (context.reg[op.p2].i mop context.reg[op.p3].i); \ break; \ case VIRG_FLOAT: \ context.reg[op.p1].f = \ (context.reg[op.p2].f mop context.reg[op.p3].f); \ break; \ case VIRG_INT64: \ context.reg[op.p1].li = \ (context.reg[op.p2].li mop context.reg[op.p3].li); \ break; \ case VIRG_DOUBLE: \ context.reg[op.p1].d = \ (context.reg[op.p2].d mop context.reg[op.p3].d); \ break; \ case VIRG_CHAR: \ context.reg[op.p1].c = \ (context.reg[op.p2].c mop context.reg[op.p3].c); \ break; \ } \ context.type[op.p1] = context.type[op.p2]; \ context.stride[op.p1] = context.stride[op.p2]; __device__ __forceinline__ void op_Add OPARGS { MATHOP(+) } __device__ __forceinline__ void op_Sub OPARGS { MATHOP(-) } __device__ __forceinline__ void op_Mul OPARGS { MATHOP(*) } __device__ __forceinline__ void op_Div OPARGS { MATHOP(/) } /** * A convenience macro for castng a register from one type to another */ #define CASTREG(reg_, destkey, t, srckey) \ context.reg[reg_].destkey = (t) context.reg[reg_].srckey; __device__ __forceinline__ void op_Cast OPARGS { switch(op.p1) { case VIRG_INT: switch(context.type[op.p2]) { case VIRG_FLOAT: CASTREG(op.p2, i, int, f); break; case VIRG_INT64: CASTREG(op.p2, i, int, li); break; case VIRG_DOUBLE: CASTREG(op.p2, i, int, d); break; case VIRG_CHAR: CASTREG(op.p2, i, int, c); break; } context.stride[op.p2] = sizeof(int); break; case VIRG_FLOAT: switch(context.type[op.p2]) { case VIRG_INT: CASTREG(op.p2, f, float, i); break; case VIRG_INT64: CASTREG(op.p2, f, float, li); break; case VIRG_DOUBLE: CASTREG(op.p2, f, float, d); break; case VIRG_CHAR: CASTREG(op.p2, f, float, c); break; } context.stride[op.p2] = sizeof(int); break; case VIRG_INT64: switch(context.type[op.p2]) { case VIRG_INT: CASTREG(op.p2, li, long long int, i); break; case VIRG_FLOAT: CASTREG(op.p2, li, long long int, f); break; case VIRG_DOUBLE: CASTREG(op.p2, li, long long int, d); break; case VIRG_CHAR: CASTREG(op.p2, li, long long int, c); break; } context.stride[op.p2] = sizeof(int); break; case VIRG_DOUBLE: switch(context.type[op.p2]) { case VIRG_INT: CASTREG(op.p2, d, double, i); break; case VIRG_FLOAT: CASTREG(op.p2, d, double, f); break; case VIRG_INT64: CASTREG(op.p2, d, double, li); break; case VIRG_CHAR: CASTREG(op.p2, d, double, c); break; } context.stride[op.p2] = sizeof(int); break; case VIRG_CHAR: switch(context.type[op.p2]) { case VIRG_INT: CASTREG(op.p2, c, char, i); break; case VIRG_FLOAT: CASTREG(op.p2, c, char, f); break; case VIRG_INT64: CASTREG(op.p2, c, char, li); break; case VIRG_CHAR: CASTREG(op.p2, c, char, d); break; } context.stride[op.p2] = sizeof(int); break; } context.type[op.p2] = (virg_t)op.p1; } __device__ __forceinline__ void op_Result OPARGS { /** * To manage outputting result rows, every thread atomically increments a * shared variable to determine how many result rows there will be, then the * first thread in the block atomically adds this number to a global * variable, with appropriate threadblock synchronization in between these * calls. This is more efficient than a scan operation in this case, since * we don't care about the order of the rows and shared memory atomic * operations are actually fairly efficient when compared to reduction * operations, which tend to cause shared memory bank conflicts. */ unsigned place; __syncthreads(); // if this is a valid row, update the shared variable if(valid) place = atomicAdd(&block, 1); __syncthreads(); unsigned num_rows = block; // the first thread in the block updates the global variable if(threadIdx.x == 0) { if(scratch != NULL) // mapped bstart = atomicAdd(&row_counter, block); else { bstart = atomicAdd(&res->rows, block); } } __syncthreads(); // TODO check for result tablet overflow unsigned block_start = bstart; //block = 0; unsigned j; char *p; // if this is a result row if(valid) // for every register to be output for this result row for(j = op.p1; j < op.p1 + op.p2; j++) { // register/column stride unsigned stride = context.stride[j]; unsigned col_location = stride * (block_start + place); // if not mapped, write to the result tablet if(scratch == NULL || VIRG_NOTWOSTEP) p = (char*)res + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + col_location; // if mapped, write to the scratch memory area for buffering before // sending across the PCI bus else p = (char*)scratch + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + col_location; //printf("write row %u\n", place); // switch the write based on the variable stride switch(stride) { case 4: ((int*)p)[0] = context.reg[j].i; break; case 8: ((double*)p)[0] = context.reg[j].d; break; case 1: p[0] = context.reg[j].c; break; } } /** * If mapped memory is being used, result rows are written back to main * memory in a two step process. This both reduces the number of accesses * that have to cross the PCI bus, and ensures that the results are being * coalesced properly, as the coalescing rules for mapped memory appear to * be slightly more strict than for GPU global memory. First, result rows * are written to GPU global memory, exactly how they would be if serial * execution and memory transfers were being used. The results are divided * into threadblock-sized blocks, and once we are sure that the results are * written to global memory, we increment the global counters for the blocks * that we have written to. After incrementing, we check to see if the * blocks we have written to are completely filled with rows. If so, every * thread in the threadblock transfers a row from the result block to main * memory, thus maximizing efficiency and coalescing. Tests show that this * is a very good way of managing result transfers of this kind, since * transfers back to main memory proceed efficiently but are overlapped with * kernel execution. */ // if we are using mapped memory if(scratch != NULL && !VIRG_NOTWOSTEP) { // make sure our result writes have reached global memory __threadfence(); __syncthreads(); // only do this in the first cuda thread if(threadIdx.x == 0) { shared_blockorder = atomicAdd(&threadblock_order, 1); if(num_rows > 0) { unsigned *threadswritten = (unsigned*)((char*)scratch + VIRG_TABLET_SIZE); unsigned result_blockid = block_start / VIRG_THREADSPERBLOCK;//blockDim.x; unsigned blockbreak = ((block_start + num_rows - 1) & VIRG_THREADSPERBLOCK_MASK); unsigned x, y; // if the results span over two different thread block areas if((blockbreak > (block_start & VIRG_THREADSPERBLOCK_MASK)) && (block_start + num_rows != blockbreak)) { // increment the rows that have been written for both unsigned thisnewrows = blockbreak - block_start; x = atomicAdd(&threadswritten[result_blockid], thisnewrows); thisblockwritten = x + thisnewrows; unsigned nextnewrows = block_start + num_rows - blockbreak; y = atomicAdd(&threadswritten[result_blockid + 1], nextnewrows); nextblockwritten = y + nextnewrows; } // otherwise increment the rows that have been written for only // this result threadblock area else { x = atomicAdd(&threadswritten[result_blockid], num_rows); thisblockwritten = x + num_rows; nextblockwritten = 0; } } } __syncthreads(); // if an entire threadblock-sized area has been filled with result rows if(num_rows > 0 && thisblockwritten == VIRG_THREADSPERBLOCK) { unsigned aligned_start = block_start & VIRG_THREADSPERBLOCK_MASK; // do coalesced writes from global memory to mapped main memory of // this block of results for(j = op.p1; j < op.p1 + op.p2; j++) { unsigned stride = context.stride[j]; p = (char*)res + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); char *p_src = (char*)scratch + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); switch(stride) { case 4: ((int*)p)[0] = ((int*)p_src)[0]; break; case 8: ((int*)p)[0] = ((int*)p_src)[0]; break; case 1: if(threadIdx.x < VIRG_THREADSPERBLOCK / 4) ((int*)p)[0] = ((int*)p_src)[0]; break; } } } if(shared_blockorder == gridDim.x - 1) { unsigned aligned_start = row_counter & VIRG_THREADSPERBLOCK_MASK; // do coalesced writes from global memory to mapped main memory of // this block of results for(j = op.p1; j < op.p1 + op.p2; j++) { unsigned stride = context.stride[j]; p = (char*)res + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); char *p_src = (char*)scratch + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); switch(stride) { case 4: ((int*)p)[0] = ((int*)p_src)[0]; break; case 8: ((int*)p)[0] = ((int*)p_src)[0]; break; case 1: if(threadIdx.x < VIRG_THREADSPERBLOCK / 4) ((int*)p)[0] = ((int*)p_src)[0]; break; } } } // if a second thread-block sized area has been filled with result rows if(num_rows > 0 && nextblockwritten == VIRG_THREADSPERBLOCK) { unsigned aligned_start = (block_start + num_rows) & VIRG_THREADSPERBLOCK_MASK; // do coalesced writes from global memory to mapped main memory of // this block of results for(j = op.p1; j < op.p1 + op.p2; j++) { unsigned stride = context.stride[j]; p = (char*)res + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); char *p_src = (char*)scratch + meta_res->fixed_block + meta_res->fixed_offset[j - op.p1] + stride * (aligned_start + threadIdx.x); switch(stride) { case 4: ((int*)p)[0] = ((int*)p_src)[0]; break; case 8: ((int*)p)[0] = ((int*)p_src)[0]; break; case 1: if(threadIdx.x < VIRG_THREADSPERBLOCK / 4) ((int*)p)[0] = ((int*)p_src)[0]; break; } } } } // if we are using mapped memory } /** * @ingroup vm * @brief CUDA virtual machine kernel * * This function executes a virtual machine context on a data tablet in * parallel. Opcodes are are accessed using a switch statement, since there is * no support for indirect jumping on current NVIDIA hardware. * * @param tab_slot The GPU constant memory slot containing the data * tablet's meta information * @param res_slot The GPU constant memory slot containing the result * tablet's meta information * @param tab_ Pointer to the data tablet * @param res_ Pointer to the result tablet * @param start_row The row at which to start processing the data tablet * @param num_rows The number of rows to process from the data tablet, 0 * for as many as possible * @param scratch Buffer tablet used to store intermediate results in * global memory before moving them to mapped main memory, only set for mapped * memory execution * @return VIRG_SUCCESS or VIRG_FAIL depending on errors during the function * call */ __global__ void virginia_gpu( unsigned tab_slot, unsigned res_slot, void* tab_, void* res_, unsigned start_row, unsigned num_rows, void *scratch) { // tablet pointers virg_tablet_meta *res = (virg_tablet_meta*)res_; virg_tablet_meta *meta_tab = &meta[tab_slot]; virg_tablet_meta *meta_res = &meta[res_slot]; // misc kernel variables unsigned pc = vm.pc; unsigned pc_wait = 0; int valid = 1; unsigned row = blockIdx.x * blockDim.x + threadIdx.x; if(threadIdx.x == 0) block = 0; virg_vm_context context; // if we've reached the end of the data tablet or the number of rows we're // supposed to process in this kernel launch then this row is not valid, // otherwise go to the row calculated with the thread id and block id if(row >= meta_tab->rows || (row >= num_rows && num_rows != 0)) valid = 0; else row += start_row; // int op = vm.stmt[pc].op; // __asm(".global .u32 jmptbl[2] = {op_Column, op_Integer};"); // __asm("bra %%op, jmptbl;"); while(1) { // if this thread has diverged and is waiting at a later opcode, then // don't switch on the current opcode if(pc_wait > 0) pc_wait--; // otherwise switch on the current global opcode else { #define ARG (vm.stmt[pc], context, meta_tab, meta_res, tab_, res, scratch, valid, pc, pc_wait) switch(vm.stmt[pc].op) { case OP_Column : op_Column ARG; break; case OP_Rowid : op_Rowid ARG; break; case OP_Result : op_Result ARG; break; case OP_Invalid : op_Invalid ARG; break; case OP_Integer : op_Integer ARG; break; case OP_Float : op_Float ARG; break; case OP_Converge: return; case OP_Le : op_Le ARG; break; case OP_Lt : op_Lt ARG; break; case OP_Ge : op_Ge ARG; break; case OP_Gt : op_Gt ARG; break; case OP_Eq : op_Eq ARG; break; case OP_Neq : op_Neq ARG; break; case OP_Add : op_Add ARG; break; case OP_Sub : op_Sub ARG; break; case OP_Mul : op_Mul ARG; break; case OP_Div : op_Div ARG; break; case OP_And : op_And ARG; break; case OP_Or : op_Or ARG; break; case OP_Not : op_Not ARG; break; case OP_Cast : op_Cast ARG; break; } } pc++; } }
the_stack
#include <algorithm> // Pseudo-random number generator namespace amgx { static __host__ __device__ unsigned int hash_function(unsigned int a, unsigned int seed) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } struct is_zero { __host__ __device__ bool operator()(int x) { return x == 0; } }; // --------------------------- // Kernels // --------------------------- template< int CTA_SIZE> __global__ void color_kernel_warp( const int A_num_rows, const int *A_rows, const int *A_cols, int num_colors, int *new_color, int *A_colors ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); volatile __shared__ unsigned long long colors_used[NUM_WARPS_PER_CTA][32]; // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { colors_used[warp_id][lane_id] = 0ull; int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } int row_hash = hash_function(row_id, 0); bool max_row = true; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( ; row_begin < row_end ; row_begin += 32) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } // Each thread hashes its column id. int col_hash = hash_function( col_id, 0 ); // Get the color of the column. int col_color = -1; if ( row_it < row_end && col_id < A_num_rows ) { col_color = A_colors[col_id]; } if (col_color != -1) { // Set the bit corresponding to that color to 1 colors_used[warp_id][lane_id] |= (1 << col_color); } max_row &= (row_hash > col_hash || col_color != 0 || col_color == -1); } max_row &= utils::all( max_row ); if (max_row) // If I'm a max { unsigned int active_mask = utils::activemask(); // warp reduction to figure out what color to assign if (lane_id < 16) { colors_used[warp_id][lane_id] |= colors_used[warp_id][lane_id + 16]; } utils::syncwarp(active_mask); if (lane_id < 8) { colors_used[warp_id][lane_id] |= colors_used[warp_id][lane_id + 8]; } utils::syncwarp(active_mask); if (lane_id < 4) { colors_used[warp_id][lane_id] |= colors_used[warp_id][lane_id + 4]; } utils::syncwarp(active_mask); if (lane_id < 2) { colors_used[warp_id][lane_id] |= colors_used[warp_id][lane_id + 2]; } utils::syncwarp(active_mask); if (lane_id < 1) { colors_used[warp_id][lane_id] |= colors_used[warp_id][lane_id + 1]; } utils::syncwarp(active_mask); if (lane_id == 0) { // Find the first unassigned color, starting at 1 #pragma unroll for (int i = 1; i < 64; i++) { unsigned long long mask = 1 << i; if ( !(colors_used[warp_id][0] & mask) ) { A_colors[row_id] = i; if (i >= num_colors) { new_color[0] = 1; } break; } } } } } } template< int CTA_SIZE> __global__ void color_kernel_thread( const int A_num_rows, const int *A_rows, const int *A_cols, int *d_num_colors, int *d_new_color, int *A_colors, int *d_num_uncolored_block, int *d_done) { const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE; // Row identifier. int row_id = blockIdx.x * CTA_SIZE + threadIdx.x; if (d_done[0] == 1) { return; } int num_uncolored_thread = 0; // Iterate over rows. for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID ) { unsigned long long used_colors = 0ull; int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } else { num_uncolored_thread++; } int row_hash = hash_function(row_id, 0); //int row_hash = row_id; bool max_row = true; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( int row_it = row_begin; row_it < row_end ; row_it++) { // Get the column index (if the iterator is valid). int col_id = A_cols[row_it]; if (col_id >= A_num_rows || col_id == row_id) { continue; } // Each thread hashes its column id. int col_hash = hash_function( col_id, 0 ); // Get the color of the column. int col_color = A_colors[col_id]; // Update the color. if ( col_color > 0) { used_colors |= 1ull << (64 - col_color); } // Is it still a max row? max_row &= (row_hash > col_hash || col_color != 0); } if ( max_row ) { int my_color = 64 - utils::bfind( ~used_colors ); if ( my_color > 0 ) { A_colors[row_id] = my_color; } if ( my_color >= d_num_colors[0] ) { d_new_color[0] = 1; } } } // Do per block reduction __shared__ volatile int smem[CTA_SIZE]; smem[threadIdx.x] = num_uncolored_thread; __syncthreads(); for ( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if ( threadIdx.x < off ) { smem[threadIdx.x] += smem[threadIdx.x + off]; } __syncthreads(); } // warp reduce if ( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if ( threadIdx.x == 0 ) { d_num_uncolored_block[blockIdx.x] = smem[0]; } } template<int MAX_CTA_SIZE, bool TERMINATE_IF_FULL_COLORS_USED> __global__ void count_uncolored_and_num_colors_kernel(int *d_new_color, int *d_num_colors, int max_uncolored_rows, int *d_num_uncolored_block, int *d_num_uncolored, int *d_done, int *h_done, int *d_start_color, int CTA_SIZE) { // Do block reduction __shared__ volatile int smem[MAX_CTA_SIZE]; if (threadIdx.x >= CTA_SIZE) { smem[threadIdx.x] = 0; } else { smem[threadIdx.x] = d_num_uncolored_block[threadIdx.x]; } __syncthreads(); for ( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if ( threadIdx.x < off ) { smem[threadIdx.x] += smem[threadIdx.x + off]; } __syncthreads(); } // warp reduce if ( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if ( threadIdx.x == 0 ) { int prev_num_uncolored = d_num_uncolored[0]; int num_uncolored = smem[0]; int num_colors = d_num_colors[0]; if (d_new_color[0]) { num_colors++; // New color has been added d_new_color[0] = 0; // Reset to 0 } // Check for termination if (TERMINATE_IF_FULL_COLORS_USED) { // Check for termination if (num_colors == 64 || prev_num_uncolored == num_uncolored || num_uncolored <= max_uncolored_rows) { h_done[0] = 1; d_done[0] = 1; } } else { if (prev_num_uncolored == num_uncolored || num_uncolored <= max_uncolored_rows) { if (num_uncolored <= max_uncolored_rows) { h_done[0] = 1; d_done[0] = 1; } else { d_start_color[0] += 32; } } } // Write to gmem d_num_uncolored[0] = num_uncolored; d_num_colors[0] = num_colors; } } ////////////////////////////// // ANYRING version: begin // This unrolled recursion is used to visit neighbors. ////////////////////////////// template<int COLORING_LEVEL> struct parallel_greedy_neighborhood { __device__ __inline__ static void visit(int row_id, int row_hash, int visit_id, const int A_num_rows, const int *A_rows, const int *A_cols, int *A_colors, const int &start_color, bool &max_row, unsigned long long &used_colors ) { int row_begin = A_rows[visit_id ]; int row_end = A_rows[visit_id + 1]; for ( int row_it = row_begin; row_it < row_end ; row_it++) { // Get the column index (if the iterator is valid). int col_id = A_cols[row_it]; if (col_id >= A_num_rows || col_id == visit_id) { continue; } // Each thread hashes its column id. int col_hash = hash_function( col_id, 0 ); // Get the color of the column. int col_color = A_colors[col_id]; // Update the color. if ( col_color - start_color > 0) { used_colors |= 1ull << (64 - col_color + start_color); } // Is it still a max row? max_row &= (row_hash > col_hash || (row_hash == col_hash && col_id >= row_id) || col_color != 0); if (!max_row) { break; } parallel_greedy_neighborhood < COLORING_LEVEL - 1 >::visit(row_id, row_hash, col_id, A_num_rows, A_rows, A_cols, A_colors, start_color, max_row, used_colors); if (!max_row) { break; } } } }; template<> struct parallel_greedy_neighborhood<0> { __device__ __inline__ static void visit(int row_id, int row_hash, int visit_id, const int A_num_rows, const int *A_rows, const int *A_cols, int *A_colors, const int &start_color, bool &max_row, unsigned long long &used_colors ) { ; } }; ////////////////////////////// // This kernel is equivalent to color_kernel_thread, but if COLORING_LEVEL>=2 it recurses to neighbors'+ neighbors. // it also uses start_color_ to continue coloring with parallel greedy instead of falling back to min_max if more than 64 colors are used ////////////////////////////// template< int CTA_SIZE, int COLORING_LEVEL > __global__ void color_kernel_thread_anyring( const int A_num_rows, const int *A_rows, const int *A_cols, int *d_num_colors, int *d_new_color, int *A_colors, int *d_num_uncolored_block, int *d_done, const int *start_color_) { const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE; // Row identifier. int row_id = blockIdx.x * CTA_SIZE + threadIdx.x; if (d_done[0] == 1) { return; } const int start_color = start_color_[0];// (d_num_colors[0] / 64)*64; int num_uncolored_thread = 0; // Iterate over rows. for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID ) { unsigned long long used_colors = 0ull; int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } else { num_uncolored_thread++; } int row_hash = hash_function(row_id, 0); //int row_hash = row_id; bool max_row = true; if (COLORING_LEVEL == 1) { // This path is equivalent to color_kernel_thread, but it uses start_color. // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( int row_it = row_begin; row_it < row_end ; row_it++) { // Get the column index (if the iterator is valid). int col_id = A_cols[row_it]; if (col_id >= A_num_rows || col_id == row_id) { continue; } // Each thread hashes its column id. int col_hash = hash_function( col_id, 0 ); // Get the color of the column. int col_color = A_colors[col_id]; // Update the color. if ( col_color - start_color > 0) { used_colors |= 1ull << (64 - col_color + start_color); } // Is it still a max row? max_row &= (row_hash > col_hash || (row_hash == col_hash && col_id >= row_id) || col_color != 0); if (!max_row) { break; } } } else { //recurse parallel_greedy_neighborhood<COLORING_LEVEL>::visit(row_id, row_hash, row_id, A_num_rows, A_rows, A_cols, A_colors, start_color, max_row, used_colors); } if ( max_row ) { int my_color = 64 - utils::bfind( ~used_colors ); if ( my_color > 0 && my_color < 64) { A_colors[row_id] = my_color + start_color; if ( my_color + start_color >= d_num_colors[0] ) { d_new_color[0] = 1; } } } } // Do per block reduction __shared__ volatile int smem[CTA_SIZE]; smem[threadIdx.x] = num_uncolored_thread; __syncthreads(); for ( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if ( threadIdx.x < off ) { smem[threadIdx.x] += smem[threadIdx.x + off]; } __syncthreads(); } // warp reduce if ( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if ( threadIdx.x == 0 ) { d_num_uncolored_block[blockIdx.x] = smem[0]; } } ////////////////////////////// // ANYRING version: end ////////////////////////////// template< int CTA_SIZE> __global__ void color_kernel_thread_fallback( const int A_num_rows, const int *A_rows, const int *A_cols, int num_colors, int *new_color, int *A_colors ) { const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE; // Row identifier. int row_id = blockIdx.x * CTA_SIZE + threadIdx.x; // Iterate over rows. for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID ) { int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } int row_hash = hash_function(row_id, 0); bool max_row = true; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; int max_color = 0; //if (row_id == 2) printf("row_id=%d\n",row_id); for ( int row_it = row_begin; row_it < row_end ; row_it++) { // Get the column index (if the iterator is valid). int col_id = A_cols[row_it]; if (col_id >= A_num_rows) { continue; } // Each thread hashes its column id. int col_hash = hash_function( col_id, 0 ); // Get the color of the column. int col_color = A_colors[col_id]; // Update the color. if ( col_color > max_color) { max_color = col_color; } // Is it still a max row? max_row &= (row_hash > col_hash || col_color != 0); } if ( max_row ) { int my_color = max_color + 1; if ( my_color > 0 ) { A_colors[row_id] = my_color; } if ( my_color >= num_colors ) { new_color[0] = 1; } } } } template< int CTA_SIZE, int WARP_SIZE > __global__ void dbg_check_coloring_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_colors, const int *A_gtlt_count, int *error_found ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { int row_color = A_colors[row_id]; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( ; row_begin < row_end ; row_begin += WARP_SIZE ) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } // Get the color of the column. int col_color = -1; if ( row_it < row_end && col_id < A_num_rows ) { col_color = A_colors[col_id]; } // Is there something wrong ?? if ( row_id != col_id && row_color == col_color ) { if ( A_gtlt_count != NULL && !error_found[0] ) { printf( "row_id=%d, row_color=%d, col_id=%d, col_color=%d\n", row_id, row_color, col_id, col_color ); } error_found[0] = 1; } } } } // --------------------------- // Methods // --------------------------- template< class T_Config > Parallel_Greedy_Matrix_Coloring_Base<T_Config>::Parallel_Greedy_Matrix_Coloring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope) { if ( this->m_coloring_level > 5 ) { FatalError( "Not implemented for coloring_level > 5", AMGX_ERR_NOT_SUPPORTED_TARGET ); } if ( cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default")) { m_uncolored_fraction = 0.0; } else { m_uncolored_fraction = cfg.AMG_Config::getParameter<double>("max_uncolored_percentage", cfg_scope); } } // Block version template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Parallel_Greedy_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A ) { ViewType oldView = A.currentView(); this->m_row_colors.resize( A.row_offsets.size() - 1 ); if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); } else { A.setViewExterior(); } const int num_rows = A.get_num_rows(); const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows ); const int CTA_SIZE = 128; const int MAX_GRID_SIZE = 1024; if (MAX_GRID_SIZE > 1024) { FatalError("Grid size must be less than 1024\n", AMGX_ERR_CORE); } const int GRID_SIZE = std::min( MAX_GRID_SIZE, (num_rows + CTA_SIZE - 1) / CTA_SIZE); thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 ); cudaCheckError(); typedef typename Matrix_d::IVector IVector_d; cudaStream_t stream = thrust::global_thread_handle::get_stream(); IVector_d d_new_color(1); cudaMemsetAsync(d_new_color.raw(), 0, sizeof(int), stream); this->m_num_colors = 1; IVector_d d_num_colors(1); IVector_d d_start_color(1); //for coloring safely without fallback cudaMemcpyAsync(d_num_colors.raw(), &this->m_num_colors, sizeof(int), cudaMemcpyHostToDevice, stream); int tmp = 0; cudaMemcpyAsync(d_start_color.raw(), &tmp, sizeof(int), cudaMemcpyHostToDevice, stream); IVector_d d_num_uncolored(1); IVector_d d_num_uncolored_block(MAX_GRID_SIZE); int *h_done = NULL; thrust::global_thread_handle::cudaMallocHost( (void **) &h_done, sizeof(int)); IVector_d d_done(1); d_done[0] = 0; *h_done = 0; cudaEvent_t throttle_event = 0; cudaEventCreateWithFlags(&throttle_event, cudaEventDisableTiming); int iteration = 0; while (h_done[0] == 0) { #define PG_ARGS num_rows,\ A.row_offsets.raw(),\ A.col_indices.raw(),\ d_num_colors.raw(),\ d_new_color.raw(),\ this->m_row_colors.raw(),\ d_num_uncolored_block.raw(),\ d_done.raw() if (this->m_coloring_level == 1) { color_kernel_thread<CTA_SIZE> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(PG_ARGS); count_uncolored_and_num_colors_kernel<MAX_GRID_SIZE, true> <<< 1, MAX_GRID_SIZE, 0, stream>>>(d_new_color.raw(), d_num_colors.raw(), max_uncolored_rows, d_num_uncolored_block.raw(), d_num_uncolored.raw(), d_done.raw(), h_done, d_start_color.raw(), GRID_SIZE); } else { if (this->m_coloring_level == 2) { color_kernel_thread_anyring<CTA_SIZE, 2> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(PG_ARGS, d_start_color.raw()); } else if (this->m_coloring_level == 3) { color_kernel_thread_anyring<CTA_SIZE, 3> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(PG_ARGS, d_start_color.raw()); } else if (this->m_coloring_level == 4) { color_kernel_thread_anyring<CTA_SIZE, 4> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(PG_ARGS, d_start_color.raw()); } else if (this->m_coloring_level == 5) { color_kernel_thread_anyring<CTA_SIZE, 5> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(PG_ARGS, d_start_color.raw()); } count_uncolored_and_num_colors_kernel<MAX_GRID_SIZE, false> <<< 1, MAX_GRID_SIZE, 0, stream>>>(d_new_color.raw(), d_num_colors.raw(), max_uncolored_rows, d_num_uncolored_block.raw(), d_num_uncolored.raw(), d_done.raw(), h_done, d_start_color.raw(), GRID_SIZE); } // Throttle every 4 iteration if (iteration % 4 == 0) { cudaEventRecord(throttle_event); } else { cudaEventSynchronize(throttle_event); }; iteration++; cudaCheckError(); } typedef typename Matrix_h::IVector IVector_h; IVector_h new_color(1); cudaEventDestroy(throttle_event); this->m_num_colors = d_num_colors[0]; int num_uncolored = d_num_uncolored[0]; int prev_num_uncolored = 0; // Fallback path if # colors exceeds 64, instead of picking the smallest unassigned color, // it sets its color to max neighbor color + 1 if (this->m_num_colors == 64) { // Choose highest unassigned color among the neighbors for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows && prev_num_uncolored != num_uncolored ; ) { prev_num_uncolored = num_uncolored; new_color[0] = 0; // Host to device transfer color_kernel_thread_fallback<CTA_SIZE> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_num_colors, thrust::raw_pointer_cast (&new_color.front()), this->m_row_colors.raw() ); cudaCheckError(); num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() ); if (new_color[0]) { this->m_num_colors++; } cudaCheckError(); } } #if 0 device_vector_alloc<int> error_found( 1, 0 ); dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), NULL, thrust::raw_pointer_cast( &error_found.front() ) ); cudaCheckError(); if ( error_found[0] != 0 ) { std::cout << "INVALID COLORING !!! Two neighbors have the same color!!!" << std::endl; } #endif thrust::global_thread_handle::cudaFreeHost(h_done); A.setView(oldView); } #define AMGX_CASE_LINE(CASE) template class Parallel_Greedy_Matrix_Coloring_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Parallel_Greedy_Matrix_Coloring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // end namespace amgx
the_stack
// CurveTopology // 曲线间的相位关系 #include "CurveTopology.h" #include <stdio.h> #include <iostream> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:IN_LABEL 和 OUT_LABEL // 定义了曲线内的点和曲线外的点标记值 #define IN_LABEL 255 #define OUT_LABEL 0 // Kernel 函数:_setCloseAreaKer(将封闭曲线包围的内部区域的值变为白色) // 引用邱孝兵实现的 _labelCloseAreaKer(标记封闭区域),详见 HistogramDifference // 该核函数使用著名的射线法确定点和一个封闭曲线的位置关系,即如果由当前点引射线, // 与曲线有奇数个交点则在内部,如果有偶数个交点,则在曲线外部( 0 属于偶数), // 为了提高准确度,在实际实现的时候,我们使用了由当前点向上下左右四个方向引射线 // 来确定位置关系。引用该算法实现将封闭曲线包围的内部区域的值变为白色,并且需要 // 得到闭合曲线包围的点的个数,用于后续处理 static __global__ void // Kernel 函数无返回值 _setCloseAreaKer( CurveCuda curve, // 输入曲线 ImageCuda maskimg, // 输出标记结果 int *count // 闭合曲线包围点的个数 ); // Kernel 函数:_matdotKer(得到两幅图像数据的点积) // 该核函数实现了两幅图像的点积,两图像其实可以看做两个矩阵,这样就转化成两矩阵 // 的点积,由于得到是 0-255 的二值图像,当对应位置灰度值都为 255 时候,结果可以 // 认为 1 * 1,返回一个 1。最后把得到的所有 1 相加得到点积 static __global__ void // Kernel 函数无返回值 _matdotKer( ImageCuda inimg1, // 标记图像1 ImageCuda inimg2, // 标记图像2 int *partial_sum // 点积结果 ); // Kernel 函数:_intersecNumtKer(得到两个曲线的交点个数) // 输入两个曲线,得到两个曲线的交点,根据第一条曲线进行并行划分,并行得到第一条 // 曲线的坐标点,根据这个坐标点去循环查询第二条曲线是否有相等的坐标点,最终得到 // 两个曲线的交点个数,返回在下面的部分和中。 static __global__ void // Kernel 函数无返回值 _intersecNumtKer( CurveCuda curve1, // 输入曲线1 CurveCuda curve2, // 输入曲线2 int *sectnum // 部分和 ); // Kernel 函数:_setCloseAreaKer(将封闭曲线包围的内部区域的值变为白色) static __global__ void _setCloseAreaKer(CurveCuda curve, ImageCuda maskimg, int *count) { // 计算当前线程的索引 int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * blockDim.y + threadIdx.y; // 判断当前线程是否越过输入图像尺寸 if (xidx >= maskimg.imgMeta.width || yidx >= maskimg.imgMeta.height) return; // 定义部分寄存器变量 int downcount = 0; // 向下引射线和曲线的交点个数 int length = curve.crvMeta.curveLength; // 曲线上的点的个数 int outpitch = maskimg.pitchBytes; // 输出标记图像的 pitch // 首先将所有点标记为曲线外的点 maskimg.imgMeta.imgData[yidx * outpitch+ xidx] = OUT_LABEL; int flag = 0; // 判断是否进入切线区域 // 遍历曲线,统计上述各个寄存器变量的值 for (int i = 0; i < length; i++) { int x = curve.crvMeta.crvData[2 * i]; int y = curve.crvMeta.crvData[2 * i + 1]; // 曲线中的下一个点的位置 int j = (i + 1) % length; int x2 = curve.crvMeta.crvData[2 * j]; // 曲线中上一个点的位置 int k = (i - 1 + length) % length; int x3 = curve.crvMeta.crvData[2 * k]; // 曲线上的第 i 个点与当前点在同一列上 if (x == xidx) { if (y == yidx) { // 当前点在曲线上,此处把曲线上的点也作为曲线内部的点 maskimg.imgMeta.imgData[yidx * outpitch+ xidx] = IN_LABEL; return; } // 交点在当前点的下方 if (y > yidx) { // 曲线上下一个也在射线上时,避免重复统计,同时设置 flag // 标记交点行开始。如果下一个点不在射线上,通过 flag 判断到 // 底是交点行结束还是单点相交,如果是单点相交判断是否为突出点 // 如果是交点行结束判断是否曲线在交点行同侧,以上都不是统计值 // 加一. if (x2 == xidx) { if (flag == 0) flag = x3 - x; } else { if (flag == 0) { if ((x3 - x) * (x2 - x) <= 0) downcount++; } else { if (flag * (x2 - x) < 0) downcount++; flag = 0; } } } } } // 交点数均为奇数则判定在曲线内部 if (downcount % 2 == 1) { maskimg.imgMeta.imgData[yidx * outpitch + xidx] = IN_LABEL; atomicAdd(count, 1); } } // Kernel 函数:_matdotKer(得到两幅图像数据的点积) static __global__ void _matdotKer(ImageCuda inimg1, ImageCuda inimg2, int *partial_sum) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg1.imgMeta.width || r >= inimg1.imgMeta.height) return; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg1.pitchBytes + c; // 如果图像矩阵对应位置的像素值都不为 0,则给点积加 1 处理 if (inimg1.imgMeta.imgData[inidx] && inimg2.imgMeta.imgData[inidx]) { atomicAdd(partial_sum, 1); } } // Kernel 函数:_intersecNumtKer(得到两个曲线的交点个数) static __global__ void _intersecNumtKer(CurveCuda curve1, CurveCuda curve2, int *sectnum) { // index 表示线程处理的像素点的坐标。 int index = blockIdx.x * blockDim.x + threadIdx.x; int length1 = curve1.crvMeta.curveLength; // 曲线1上的点的个数 // 检查坐标点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (index >= length1) return; int length2 = curve2.crvMeta.curveLength; // 曲线2上的点的个数 // 得到该线程第一条曲线的坐标点数据 int x1 = curve1.crvMeta.crvData[2 * index]; int y1 = curve1.crvMeta.crvData[2 * index + 1]; int x2, y2; // 临时变量,存储第二条曲线的坐标点 // 循环查找第二条曲线的坐标点是否有相等的点 for (int i = 0; i < length2; i++) { // 得到该线程第二条曲线的坐标点数据 x2 = curve2.crvMeta.crvData[2 * i]; y2 = curve2.crvMeta.crvData[2 * i + 1]; // 如果找到,则交点加 1 if ((x1 == x2) && (y1 == y2)) { atomicAdd(sectnum, 1); } } } // 宏:FREE_CURVE_TOPOLOGY(清理局部申请的设备端或者主机端内存) // 该宏用于清理在 curveTopology 过程中申请的设备端或者主机端内存空间。 #define FREE_CURVE_TOPOLOGY do { \ if (maskimg1 != NULL) \ ImageBasicOp::deleteImage(maskimg1); \ if (maskimg2 != NULL) \ ImageBasicOp::deleteImage(maskimg2); \ if (temp_dev != NULL) \ cudaFree(temp_dev); \ } while (0) // Host 成员方法:curveTopology(曲线相位关系) __host__ int CurveTopology::curveTopology(Curve *curve1, Curve *curve2, CurveRelation *crvrelation, int width, int height) { // 判断输入曲线是否为空 if (curve1 == NULL || curve2 == NULL) return NULL_POINTER; // 检查输入参数是否有数据 if (curve1->curveLength <= 0 || curve2->curveLength <= 0 || width <= 0 || height <= 0) return INVALID_DATA; // 检查输入曲线是否为封闭曲线,如果不是封闭曲线返回错误 if (!curve1->closed || !curve2->closed) return INVALID_DATA; // 局部变量,错误码。 int errcode; cudaError_t cuerrcode; // 将曲线拷贝到 Device 内存中 errcode = CurveBasicOp::copyToCurrentDevice(curve1); if (errcode != NO_ERROR) return errcode; // 将曲线拷贝到 Device 内存中 errcode = CurveBasicOp::copyToCurrentDevice(curve2); if (errcode != NO_ERROR) return errcode; // 获取 CurveCuda 指针 CurveCuda *curvecud1 = CURVE_CUDA(curve1); CurveCuda *curvecud2 = CURVE_CUDA(curve2); // 定义临时变量,统计两个曲线包围的点数,包括曲线上的点 int count1; int count2; // 定义点积 int result; // 定义交点个数 int sectnum; // 定义局部变量,用于多份数据的一份申请 int *temp_dev = NULL; // 定义临时标记图像指针 Image *maskimg1 = NULL; Image *maskimg2 = NULL; // 给 temp_dev 在设备申请空间 cuerrcode = cudaMalloc((void**)&temp_dev, sizeof (int) * 4); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 给 temp_dev 的内容初始化为 0 cuerrcode = cudaMemset(temp_dev, 0, sizeof (int) * 4); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 定义设备指针,存储两个曲线包围的点数、点积和交点个数 int *dev_count1 = temp_dev; int *dev_count2 = dev_count1 + 1; int *dev_sum = dev_count2 + 1; int *dev_sectnum = dev_sum + 1; // 给临时标记图像1在设备申请空间 ImageBasicOp::newImage(&maskimg1); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::makeAtCurrentDevice(maskimg1, width, height); if (errcode != NO_ERROR) { FREE_CURVE_TOPOLOGY; return errcode; } // 给临时标记图像2在设备申请空间 ImageBasicOp::newImage(&maskimg2); if (errcode != NO_ERROR) { FREE_CURVE_TOPOLOGY; return errcode; } errcode = ImageBasicOp::makeAtCurrentDevice(maskimg2, width, height); if (errcode != NO_ERROR) { FREE_CURVE_TOPOLOGY; return errcode; } // 获取 ImageCuda 指针 ImageCuda *maskimgcud1 = IMAGE_CUDA(maskimg1); ImageCuda *maskimgcud2 = IMAGE_CUDA(maskimg2); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (width + blocksize.x - 1) / blocksize.x; gridsize.y = (height + blocksize.y - 1) / blocksize.y; // 调用核函数,将封闭曲线包围的内部区域的值变为白色,并且得到包围点的个数 _setCloseAreaKer<<<gridsize, blocksize>>>( *curvecud1, *maskimgcud1, dev_count1); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 拷贝 dev_count1 到 Host 端 cuerrcode = cudaMemcpy(&count1, dev_count1, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 得到第一条曲线包围的点的个数,包括曲线上的点 count1 += curvecud1->capacity; // 调用核函数,将封闭曲线包围的内部区域的值变为白色,并且得到包围点的个数 _setCloseAreaKer<<<gridsize, blocksize>>>( *curvecud2, *maskimgcud2, dev_count2); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 拷贝 dev_count2 到 Host 端 cuerrcode = cudaMemcpy(&count2, dev_count2, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 得到第二条曲线包围的点的个数,包括曲线上的点 count2 += curvecud2->capacity; // 调用核函数,得到两幅图像矩阵的点积 _matdotKer<<<gridsize, blocksize>>>(*maskimgcud1, *maskimgcud2, dev_sum); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 拷贝 dev_sum 到 Host 端 cuerrcode = cudaMemcpy(&result, dev_sum, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 如果两个图像矩阵点积为 0,则表示两个曲线没有包含、被包含和相交关系,是 // 属于除上述三种外的其他关系,设置曲线关系为其他关系 if (result == 0) { crvrelation->relation = CURVE_OTHERSHIP; crvrelation->internum = 0; } // 如果两个图像矩阵点积为第一条曲线的包围点的个数,则曲线1被包含在曲线2中, // 设置曲线关系为被包含 else if (result == count1) { crvrelation->relation = CURVE_INCLUDED; crvrelation->internum = 0; } // 如果两个图像矩阵点积为第二条曲线的包围点的个数,则曲线1包含在曲线2中,设 // 置曲线关系为包含 else if (result == count2) { crvrelation->relation = CURVE_INCLUDE; crvrelation->internum = 0; } // 除上面三种情况外,则两个曲线相交,设置曲线相位关系为相交 else crvrelation->relation = CURVE_INTERSECT; // 如果曲线是相交关系,则开始求交点个数 if (crvrelation->relation == CURVE_INTERSECT) { // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。这里使用一维线程块 int blocksize1, gridsize1; blocksize1 = DEF_BLOCK_X * DEF_BLOCK_Y; gridsize1 = (curvecud1->capacity + blocksize1 - 1) / blocksize1; // 调用核函数,得到两条曲线的交点个数 _intersecNumtKer<<<gridsize1, blocksize1>>>( *curvecud1, *curvecud2, dev_sectnum); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 拷贝 dev_sectnum 到 Host 端 cuerrcode = cudaMemcpy(&sectnum, dev_sectnum, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FREE_CURVE_TOPOLOGY; return CUDA_ERROR; } // 得到曲线的交点数目 crvrelation->internum = sectnum; } // 释放临时申请的空间 FREE_CURVE_TOPOLOGY; // 程序执行结束,返回 return NO_ERROR; }
the_stack
#include <numeric> #include <vector> #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" namespace k2 { // clang-format off bool operator==(const Arc &a, const Arc &b) { return a.src_state == b.src_state && \ a.dest_state == b.dest_state && \ a.symbol == b.symbol && \ fabs(a.score - b.score) < 1e-6; } // clang-format on TEST(FsaFromString, K2Acceptor) { // src_state dst_state label cost std::string s = R"(0 1 2 -1.2 0 2 10 -2.2 1 3 3 -3.2 1 6 -1 -4.2 2 6 -1 -5.2 2 4 2 -6.2 3 6 -1 -7.2 5 0 1 -8.2 6 )"; { auto fsa = FsaFromString(s); EXPECT_EQ(fsa.Context()->GetDeviceType(), kCpu); EXPECT_EQ(fsa.NumAxes(), 2); EXPECT_EQ(fsa.shape.Dim0(), 7); // there are 7 states EXPECT_EQ(fsa.shape.NumElements(), 8); // there are 8 arcs // Arc sorting order: src_state, symbol, dest_state, score. EXPECT_EQ((fsa[{0, 0}]), (Arc{0, 1, 2, -1.2f})); EXPECT_EQ((fsa[{0, 1}]), (Arc{0, 2, 10, -2.2f})); EXPECT_EQ((fsa[{0, 2}]), (Arc{1, 3, 3, -3.2f})); EXPECT_EQ((fsa[{0, 3}]), (Arc{1, 6, -1, -4.2f})); EXPECT_EQ((fsa[{0, 4}]), (Arc{2, 6, -1, -5.2f})); EXPECT_EQ((fsa[{0, 5}]), (Arc{2, 4, 2, -6.2f})); EXPECT_EQ((fsa[{0, 6}]), (Arc{3, 6, -1, -7.2f})); EXPECT_EQ((fsa[{0, 7}]), (Arc{5, 0, 1, -8.2f})); } } TEST(FsaFromString, OpenFstAcceptor) { // src_state dst_state label cost std::string s = R"(0 1 2 -1.2 0 2 10 -2.2 1 3 3 -3.2 1 6 4 -4.2 2 6 5 -5.2 3 6 7 -7.2 2 4 2 -6.2 5 7 1 -8.2 7 -2.3 6 -1.2 )"; { auto fsa = FsaFromString(s, true); EXPECT_EQ(fsa.Context()->GetDeviceType(), kCpu); EXPECT_EQ(fsa.NumAxes(), 2); EXPECT_EQ(fsa.shape.Dim0(), 9); // there are 9 states EXPECT_EQ(fsa.shape.NumElements(), 10); // there are 10 arcs // Arc sorting order: src_state, symbol, dest_state, score. EXPECT_EQ((fsa[{0, 0}]), (Arc{0, 1, 2, 1.2f})); EXPECT_EQ((fsa[{0, 1}]), (Arc{0, 2, 10, 2.2f})); EXPECT_EQ((fsa[{0, 2}]), (Arc{1, 3, 3, 3.2f})); EXPECT_EQ((fsa[{0, 3}]), (Arc{1, 6, 4, 4.2f})); EXPECT_EQ((fsa[{0, 4}]), (Arc{2, 6, 5, 5.2f})); EXPECT_EQ((fsa[{0, 5}]), (Arc{2, 4, 2, 6.2f})); EXPECT_EQ((fsa[{0, 6}]), (Arc{3, 6, 7, 7.2f})); EXPECT_EQ((fsa[{0, 7}]), (Arc{5, 7, 1, 8.2f})); EXPECT_EQ((fsa[{0, 8}]), (Arc{6, 8, -1, 1.2f})); EXPECT_EQ((fsa[{0, 9}]), (Arc{7, 8, -1, 2.3f})); } } TEST(FsaFromString, K2Transducer) { // src_state dst_state label aux_label cost std::string s = R"(0 1 2 22 -1.2 0 2 10 100 -2.2 1 3 3 33 -3.2 1 6 -1 16 -4.2 2 6 -1 26 -5.2 2 4 2 22 -6.2 3 6 -1 36 -7.2 5 0 1 50 -8.2 6 )"; { Array1<int32_t> aux_labels; auto fsa = FsaFromString(s, false, &aux_labels); EXPECT_EQ(fsa.Context()->GetDeviceType(), kCpu); EXPECT_EQ(aux_labels.Context()->GetDeviceType(), kCpu); EXPECT_EQ(fsa.NumAxes(), 2); EXPECT_EQ(fsa.shape.Dim0(), 7); // there are 7 states EXPECT_EQ(fsa.shape.NumElements(), 8); // there are 8 arcs EXPECT_EQ((fsa[{0, 0}]), (Arc{0, 1, 2, -1.2f})); EXPECT_EQ((fsa[{0, 1}]), (Arc{0, 2, 10, -2.2f})); EXPECT_EQ((fsa[{0, 2}]), (Arc{1, 3, 3, -3.2f})); EXPECT_EQ((fsa[{0, 3}]), (Arc{1, 6, -1, -4.2f})); EXPECT_EQ((fsa[{0, 4}]), (Arc{2, 6, -1, -5.2f})); EXPECT_EQ((fsa[{0, 5}]), (Arc{2, 4, 2, -6.2f})); EXPECT_EQ((fsa[{0, 6}]), (Arc{3, 6, -1, -7.2f})); EXPECT_EQ((fsa[{0, 7}]), (Arc{5, 0, 1, -8.2f})); EXPECT_EQ(aux_labels[0], 22); EXPECT_EQ(aux_labels[1], 100); EXPECT_EQ(aux_labels[2], 33); EXPECT_EQ(aux_labels[3], 16); EXPECT_EQ(aux_labels[4], 26); EXPECT_EQ(aux_labels[5], 22); EXPECT_EQ(aux_labels[6], 36); EXPECT_EQ(aux_labels[7], 50); } } TEST(FsaFromString, OpenFstTransducer) { // src_state dst_state label aux_label cost std::string s = R"(0 1 2 22 -1.2 0 2 10 100 -2.2 1 3 3 33 -3.2 1 6 4 16 -4.2 6 -1.2 2 6 5 26 -5.2 3 6 7 36 -7.2 2 4 2 22 -6.2 5 7 1 50 -8.2 7 -2.3 )"; { Array1<int32_t> aux_labels; auto fsa = FsaFromString(s, true, &aux_labels); EXPECT_EQ(fsa.Context()->GetDeviceType(), kCpu); EXPECT_EQ(aux_labels.Context()->GetDeviceType(), kCpu); EXPECT_EQ(fsa.NumAxes(), 2); EXPECT_EQ(fsa.shape.Dim0(), 9); // there are 9 states EXPECT_EQ(fsa.shape.NumElements(), 10); // there are 10 arcs EXPECT_EQ((fsa[{0, 0}]), (Arc{0, 1, 2, 1.2f})); EXPECT_EQ((fsa[{0, 1}]), (Arc{0, 2, 10, 2.2f})); EXPECT_EQ((fsa[{0, 2}]), (Arc{1, 3, 3, 3.2f})); EXPECT_EQ((fsa[{0, 3}]), (Arc{1, 6, 4, 4.2f})); EXPECT_EQ((fsa[{0, 4}]), (Arc{2, 6, 5, 5.2f})); EXPECT_EQ((fsa[{0, 5}]), (Arc{2, 4, 2, 6.2f})); EXPECT_EQ((fsa[{0, 6}]), (Arc{3, 6, 7, 7.2f})); EXPECT_EQ((fsa[{0, 7}]), (Arc{5, 7, 1, 8.2f})); EXPECT_EQ((fsa[{0, 8}]), (Arc{6, 8, -1, 1.2f})); EXPECT_EQ((fsa[{0, 9}]), (Arc{7, 8, -1, 2.3f})); EXPECT_EQ(aux_labels[0], 22); EXPECT_EQ(aux_labels[1], 100); EXPECT_EQ(aux_labels[2], 33); EXPECT_EQ(aux_labels[3], 16); EXPECT_EQ(aux_labels[4], 26); EXPECT_EQ(aux_labels[5], 22); EXPECT_EQ(aux_labels[6], 36); EXPECT_EQ(aux_labels[7], 50); EXPECT_EQ(aux_labels[8], 0); EXPECT_EQ(aux_labels[9], 0); } } // TODO(fangjun): write code to check the printed // strings matching expected ones. TEST(FsaToString, Acceptor) { // src_state dst_state label cost std::string s = R"(0 1 2 -1.2 0 2 10 -2.2 1 5 -1 -3.2 5 )"; auto fsa = FsaFromString(s); auto str = FsaToString(fsa); K2_LOG(INFO) << "\n" << str; str = FsaToString(fsa, true); K2_LOG(INFO) << "\n---negating---\n" << str; } TEST(FsaToString, Transducer) { // src_state dst_state label aux_label cost std::string s = R"(0 1 2 100 -1.2 0 2 10 200 -2.2 1 5 -1 300 -3.2 5 )"; Array1<int32_t> aux_labels; auto fsa = FsaFromString(s, false, &aux_labels); auto str = FsaToString(fsa, false, &aux_labels); K2_LOG(INFO) << "\n" << str; str = FsaToString(fsa, true, &aux_labels); K2_LOG(INFO) << "\n---negating---\n" << str; } template <DeviceType d> void TestGetDestStates() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } // test with simple case should be good enough std::string s1 = R"(0 1 1 0 0 2 1 0 0 3 1 0 0 3 2 0 1 2 1 0 1 3 1 0 3 4 1 0 3 5 -1 0 4 5 -1 0 5 )"; std::string s2 = R"(0 1 1 0 0 2 1 0 1 2 1 0 1 3 1 0 2 3 1 0 2 4 -1 0 4 )"; Fsa fsa1 = FsaFromString(s1); Fsa fsa2 = FsaFromString(s2); Fsa *fsa_array[] = {&fsa1, &fsa2}; FsaVec fsa_vec = CreateFsaVec(2, &fsa_array[0]); fsa_vec = fsa_vec.To(context); { // as_idx01 = false Array1<int32_t> result = GetDestStates(fsa_vec, false); ASSERT_EQ(result.Dim(), fsa_vec.NumElements()); result = result.To(cpu); std::vector<int32_t> cpu_data(result.Data(), result.Data() + result.Dim()); EXPECT_THAT(cpu_data, ::testing::ElementsAre(1, 2, 3, 3, 2, 3, 4, 5, 5, 1, 2, 2, 3, 3, 4)); } { // as_idx01 = true Array1<int32_t> result = GetDestStates(fsa_vec, true); ASSERT_EQ(result.Dim(), fsa_vec.NumElements()); result = result.To(cpu); std::vector<int32_t> cpu_data(result.Data(), result.Data() + result.Dim()); EXPECT_THAT(cpu_data, ::testing::ElementsAre(1, 2, 3, 3, 2, 3, 4, 5, 5, 7, 8, 8, 9, 9, 10)); } } TEST(FsaUtilsTest, TestGetDestStates) { TestGetDestStates<kCpu>(); TestGetDestStates<kCuda>(); } template <DeviceType d> void TestGetStateBatches() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // simple case std::string s1 = R"(0 1 1 0 0 2 1 0 0 3 1 0 0 3 2 0 1 2 1 0 1 3 1 0 3 4 1 0 3 5 -1 0 4 5 -1 0 5 )"; std::string s2 = R"(0 1 1 0 0 2 1 0 1 2 1 0 1 3 1 0 2 3 1 0 2 4 -1 0 4 )"; std::string s3 = R"(0 2 1 0 1 2 1 0 1 3 1 0 1 4 1 0 2 3 1 0 2 4 1 0 3 4 1 0 4 5 -1 0 5 )"; Fsa fsa1 = FsaFromString(s1); Fsa fsa2 = FsaFromString(s2); Fsa fsa3 = FsaFromString(s3); std::vector<int32_t> states_num = {fsa1.Dim0(), fsa2.Dim0(), fsa3.Dim0()}; Fsa *fsa_array[] = {&fsa1, &fsa2, &fsa3}; FsaVec fsa_vec = CreateFsaVec(3, &fsa_array[0]); fsa_vec = fsa_vec.To(context); int32_t num_fsas = fsa_vec.Dim0(), num_states = fsa_vec.TotSize(1); EXPECT_EQ(num_fsas, 3); { // no transpose: [fsa_idx][batch_idx][state] Ragged<int32_t> result = GetStateBatches(fsa_vec, false); result = result.To(cpu); EXPECT_EQ(result.Dim0(), num_fsas); ASSERT_EQ(result.NumElements(), num_states); int32_t *row_splits1_data = result.RowSplits(1).Data(); for (int32_t n = 0; n < num_fsas; ++n) { int32_t num_batches = row_splits1_data[n + 1] - row_splits1_data[n]; // num-batches in each fsa should not be greater num-states EXPECT_LE(num_batches, states_num[n]); if (states_num[n] > 0) { EXPECT_GT(num_batches, 0); } } // check values std::vector<int32_t> states(num_states); std::iota(states.begin(), states.end(), 0); Array1<int32_t> values = result.values; ASSERT_EQ(values.Dim(), num_states); std::vector<int32_t> cpu_values(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(cpu_values, states); } { // transpose: [batch_index][fsa_index][state] Ragged<int32_t> result = GetStateBatches(fsa_vec, true); result = result.To(cpu); // result.Dim0() is num-batches EXPECT_EQ(result.TotSize(1), num_fsas * result.Dim0()); ASSERT_EQ(result.NumElements(), num_states); int32_t *row_splits1_data = result.RowSplits(1).Data(); for (int32_t n = 0; n <= result.Dim0(); ++n) { EXPECT_EQ(row_splits1_data[n], n * num_fsas); } } } // TODO(haowen): add random cases } TEST(FsaUtilsTest, TestGetStateBatches) { TestGetStateBatches<kCpu>(); TestGetStateBatches<kCuda>(); } } // namespace k2
the_stack
#pragma once /** * @brief Graph slice structure which contains common graph structural data. * * @tparam SizeT Type of unsigned integer to use for array indexing. (e.g., * uint32) * @tparam VertexId Type of signed integer to use as vertex id (e.g., uint32) * @tparam Value Type to use as vertex / edge associated values */ template <typename VertexId, typename SizeT, typename Value> struct GraphSlice { int num_gpus; // Number of GPUs int index; // Slice index VertexId nodes; // Number of nodes in slice SizeT edges; // Number of edges in slice SizeT inverse_edges; // Number of inverse_edges in slice Csr<VertexId, SizeT, Value>* graph; // Pointer to CSR format subgraph util::Array1D<SizeT, SizeT> row_offsets; // CSR format row offset util::Array1D<SizeT, VertexId> column_indices; // CSR format column indices util::Array1D<SizeT, SizeT> out_degrees; util::Array1D<SizeT, SizeT> column_offsets; // CSR format column offset util::Array1D<SizeT, VertexId> row_indices; // CSR format row indices util::Array1D<SizeT, SizeT> in_degrees; util::Array1D<SizeT, int> partition_table; // Partition number for vertices, local is always 0 util::Array1D<SizeT, VertexId> convertion_table; // IDs of vertices in their hosting partition util::Array1D<SizeT, VertexId> original_vertex; // Original IDs of vertices util::Array1D<SizeT, SizeT> in_counter; // Incoming vertices counter from peers util::Array1D<SizeT, SizeT> out_offset; // Outgoing vertices offsets util::Array1D<SizeT, SizeT> out_counter; // Outgoing vertices counter util::Array1D<SizeT, SizeT> backward_offset; // Backward offsets for partition and conversion tables util::Array1D<SizeT, int> backward_partition; // Remote peers having the same vertices util::Array1D<SizeT, VertexId> backward_convertion; // IDs of vertices in remote peers /** * @brief GraphSlice Constructor * * @param[in] index GPU index. */ GraphSlice(int index) : index(index), graph(NULL), num_gpus(0), nodes(0), edges(0) { row_offsets.SetName("row_offsets"); column_indices.SetName("column_indices"); out_degrees.SetName("out_degrees"); column_offsets.SetName("column_offsets"); row_indices.SetName("row_indices"); in_degrees.SetName("in_degrees"); partition_table.SetName("partition_table"); convertion_table.SetName("convertion_table"); original_vertex.SetName("original_vertex"); in_counter.SetName("in_counter"); out_offset.SetName("out_offset"); out_counter.SetName("out_counter"); backward_offset.SetName("backward_offset"); backward_partition.SetName("backward_partition"); backward_convertion.SetName("backward_convertion"); } // end GraphSlice(int index) /** * @brief GraphSlice Destructor to free all device memories. */ virtual ~GraphSlice() { Release(); } cudaError_t Release() { cudaError_t retval = cudaSuccess; // Set device (use slice index) if (retval = util::SetDevice(index)) return retval; // Release allocated host / device memory if (retval = row_offsets.Release()) return retval; if (retval = column_indices.Release()) return retval; if (retval = out_degrees.Release()) return retval; if (retval = column_offsets.Release()) return retval; if (retval = row_indices.Release()) return retval; if (retval = in_degrees.Release()) return retval; if (retval = partition_table.Release()) return retval; if (retval = convertion_table.Release()) return retval; if (retval = original_vertex.Release()) return retval; if (retval = in_counter.Release()) return retval; if (retval = out_offset.Release()) return retval; if (retval = out_counter.Release()) return retval; if (retval = backward_offset.Release()) return retval; if (retval = backward_partition.Release()) return retval; if (retval = backward_convertion.Release()) return retval; return retval; } // end ~GraphSlice() /** * @brief Initialize graph slice * * @param[in] stream_from_host Whether to stream data from host * @param[in] num_gpus Number of GPUs * @param[in] graph Pointer to the sub graph * @param[in] inverstgraph Pointer to the invert graph * @param[in] partition_table The partition table * @param[in] convertion_table The conversion table * @param[in] original_vertex The original vertex table * @param[in] in_counter In_counters * @param[in] out_offset Out_offsets * @param[in] out_counter Out_counters * @param[in] backward_offsets Backward_offsets * @param[in] backward_partition The backward partition table * @param[in] backward_convertion The backward conversion table * \return cudaError_t Object indicating the success of all CUDA * function calls */ cudaError_t Init( bool stream_from_host, int num_gpus, Csr<VertexId, SizeT, Value>* graph, Csr<VertexId, SizeT, Value>* inverstgraph, int* partition_table, VertexId* convertion_table, VertexId* original_vertex, SizeT* in_counter, SizeT* out_offset, SizeT* out_counter, SizeT* backward_offsets = NULL, int* backward_partition = NULL, VertexId* backward_convertion = NULL) { cudaError_t retval = cudaSuccess; // Set local variables / array pointers this->num_gpus = num_gpus; this->graph = graph; this->nodes = graph->nodes; this->edges = graph->edges; if (inverstgraph != NULL) this->inverse_edges = inverstgraph->edges; else this->inverse_edges = 0; if (partition_table != NULL) this->partition_table.SetPointer(partition_table, nodes); if (convertion_table != NULL) this->convertion_table.SetPointer(convertion_table, nodes); if (original_vertex != NULL) this->original_vertex.SetPointer(original_vertex, nodes); if (in_counter != NULL) this->in_counter.SetPointer(in_counter, num_gpus + 1); if (out_offset != NULL) this->out_offset.SetPointer(out_offset, num_gpus + 1); if (out_counter != NULL) this->out_counter.SetPointer(out_counter, num_gpus + 1); this->row_offsets.SetPointer(graph->row_offsets, nodes + 1); this->column_indices.SetPointer(graph->column_indices, edges); if (inverstgraph != NULL) { this->column_offsets.SetPointer(inverstgraph->row_offsets, nodes + 1); this->row_indices.SetPointer(inverstgraph->column_indices, inverstgraph->edges); } // Set device using slice index if (retval = util::SetDevice(index)) return retval; // Allocate and initialize row_offsets if (retval = this->row_offsets.Allocate(nodes + 1, util::DEVICE)) return retval; if (retval = this->row_offsets.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize column_indices if (retval = this->column_indices.Allocate(edges, util::DEVICE)) return retval; if (retval = this->column_indices.Move(util::HOST, util::DEVICE)) return retval; // Allocate out degrees for each node if (retval = this->out_degrees.Allocate(nodes, util::DEVICE)) return retval; // count number of out-going degrees for each node util::MemsetMadVectorKernel<<<128, 128>>>( this->out_degrees.GetPointer(util::DEVICE), this->row_offsets.GetPointer(util::DEVICE), this->row_offsets.GetPointer(util::DEVICE) + 1, (SizeT)-1, nodes); if (inverstgraph != NULL) { // Allocate and initialize column_offsets if (retval = this->column_offsets.Allocate(nodes + 1, util::DEVICE)) return retval; if (retval = this->column_offsets.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize row_indices if (retval = this->row_indices.Allocate(inverstgraph->edges, util::DEVICE)) return retval; if (retval = this->row_indices.Move(util::HOST, util::DEVICE)) return retval; if (retval = this->in_degrees.Allocate(nodes, util::DEVICE)) return retval; // count number of in-going degrees for each node util::MemsetMadVectorKernel<<<128, 128>>>( this->in_degrees.GetPointer(util::DEVICE), this->column_offsets.GetPointer(util::DEVICE), this->column_offsets.GetPointer(util::DEVICE) + 1, (SizeT)-1, nodes); } // For multi-GPU cases if (num_gpus > 1) { // Allocate and initialize convertion_table if (retval = this->partition_table.Allocate(nodes, util::DEVICE)) return retval; if (partition_table != NULL) if (retval = this->partition_table.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize convertion_table if (retval = this->convertion_table.Allocate(nodes, util::DEVICE)) return retval; if (convertion_table != NULL) if (retval = this->convertion_table.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize original_vertex if (retval = this->original_vertex.Allocate(nodes, util::DEVICE)) return retval; if (original_vertex != NULL) if (retval = this->original_vertex.Move(util::HOST, util::DEVICE)) return retval; // If need backward information proration if (backward_offsets != NULL) { // Allocate and initialize backward_offset this->backward_offset.SetPointer(backward_offsets, nodes + 1); if (retval = this->backward_offset.Allocate(nodes + 1, util::DEVICE)) return retval; if (retval = this->backward_offset.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize backward_partition this->backward_partition.SetPointer(backward_partition, backward_offsets[nodes]); if (retval = this->backward_partition.Allocate(backward_offsets[nodes], util::DEVICE)) return retval; if (retval = this->backward_partition.Move(util::HOST, util::DEVICE)) return retval; // Allocate and initialize backward_convertion this->backward_convertion.SetPointer(backward_convertion, backward_offsets[nodes]); if (retval = this->backward_convertion.Allocate(backward_offsets[nodes], util::DEVICE)) return retval; if (retval = this->backward_convertion.Move(util::HOST, util::DEVICE)) return retval; } } // end if num_gpu>1 return retval; } // end of Init(...) /** * @brief overloaded = operator * * @param[in] other GraphSlice to copy from * * \return GraphSlice& a copy of local GraphSlice */ GraphSlice& operator=(GraphSlice other) { num_gpus = other.num_gpus; index = other.index; nodes = other.nodes; edges = other.edges; graph = other.graph; row_offsets = other.row_offsets; column_indices = other.column_indices; column_offsets = other.column_offsets; row_indices = other.row_indices; partition_table = other.partition_table; convertion_table = other.convertion_table; original_vertex = other.original_vertex; in_counter = other.in_counter; out_offset = other.out_offset; out_counter = other.out_counter; backward_offset = other.backward_offset; backward_partition = other.backward_partition; backward_convertion = other.backward_convertion; return *this; } // end operator=() }; // end GraphSlice } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> namespace at { namespace native { namespace { template <typename T> inline __device__ bool _isinf(T x) { return ::isinf(x); } inline __device__ bool _isinf(c10::Half x) { return ::isinf(static_cast<float>(x)); } inline __device__ bool _isinf(c10::BFloat16 x) { return ::isinf(static_cast<float>(x)); } #define MAX_NUM_BLOCKS 200 // Normalizes the L1 norm of every row to 1; used by multinomial template <typename scalar_t> C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS) __global__ void renormRowsL1(scalar_t* dist, long rows, long cols) { extern __shared__ unsigned char my_smem[]; scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem); scalar_t zero = static_cast<scalar_t>(0); scalar_t val; for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) { scalar_t sum = static_cast<scalar_t>(0); for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { val = dist[row * cols + col]; CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling sum = sum + val; } sum = cuda_utils::BlockReduceSum(sum, smem); if (threadIdx.x == 0) { CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling smem[0] = sum; } __syncthreads(); sum = smem[0]; if (sum > zero) { for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) { dist[row * cols + col] = dist[row * cols + col] / sum; } } } } void renormRows(Tensor& t) { TORCH_CHECK(t.dim() == 2); int64_t rows = t.size(0); int64_t cols = t.size(1); auto props = at::cuda::getCurrentDeviceProperties(); CUDA_KERNEL_ASSERT(props != NULL); int numSM = props->multiProcessorCount; const int64_t maxThreads = std::min( props->maxThreadsPerBlock, cuda_utils::kCUDABlockReduceMaxThreads); dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(std::min(maxThreads, C10_WARP_SIZE * ceil_div(cols, int64_t{C10_WARP_SIZE}))); AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] { renormRowsL1<scalar_t> <<<grid, block, (block.x / C10_WARP_SIZE) * sizeof(scalar_t), at::cuda::getCurrentCUDAStream()>>>(t.data_ptr<scalar_t>(), rows, cols); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } template <typename scalar_t> __device__ int binarySearchForMultinomial(scalar_t* cumdist, scalar_t* dist, int size, scalar_t val) { int start = 0; int end = size; // cumdist[size - 1] = 0 => all zero prob dist CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0)); while (end - start > 0) { int mid = start + (end - start) / 2; scalar_t midVal = cumdist[mid]; if (midVal < val) { start = mid + 1; } else { end = mid; } } if (start == size) { // No probability mass or precision problems; just return the // first non-zero element by setting start to size-1 here, // the code below will move it to the last non-zero probability // this actually can happen when the random number is 1 // (github pytorch issue #4858). start = size - 1; } while(start >= 1 && dist[start] == 0) start--; return start; } template <typename scalar_t> __global__ void sampleMultinomialWithReplacement(PhiloxCudaState philox_args, int totalSamples, int64_t* dest, int64_t distributions, int categories, scalar_t* normDistPrefixSum, scalar_t* normDist) { // At the moment, each warp computes one sample value in the binary // search due to divergence. It seems possible to compute multiple // values and limit divergence though later on. auto seeds = at::cuda::philox::unpack(philox_args); // global index formula for 2D grid of 1D blocks int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(std::get<0>(seeds), idx, std::get<1>(seeds), &state); // The block determines the distribution for which we generate a point for (int64_t curDist = blockIdx.y; curDist < distributions; curDist += gridDim.y) { for (int sample = blockIdx.x*blockDim.x + threadIdx.x; sample < totalSamples; sample += blockDim.x*gridDim.x) { //we are losing 3 out of 4 generated numbers but it's ok //this kernel is not very efficient anyway auto rand = curand_uniform4(&state); scalar_t r = static_cast<scalar_t>(rand.x); // Find the bucket that a uniform sample lies in int choice = binarySearchForMultinomial<scalar_t>( normDistPrefixSum + curDist * categories, normDist + curDist * categories, categories, r); dest[curDist * totalSamples + sample] = choice; } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS) __global__ void sampleMultinomialOnce( int64_t* dest, int64_t distributions, int categories, scalar_t* sampled, scalar_t* dist, int stride_dist, // dist->stride(0) int stride_categories // dist->stride(1) ) { extern __shared__ unsigned char my_smem[]; __shared__ bool found; __shared__ unsigned foundPos; accscalar_t *smem = reinterpret_cast<accscalar_t *>(my_smem); accscalar_t accZero = static_cast<accscalar_t>(0); scalar_t zero = static_cast<scalar_t>(0); for (int64_t curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) { // Each block handles one distribution // First pass, find the total sum of the distribution accscalar_t sum = accZero; scalar_t val; for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) { val = dist[curDist * stride_dist + cat * stride_categories]; CUDA_KERNEL_ASSERT(!at::_isnan(val)); CUDA_KERNEL_ASSERT(!_isinf(val)); CUDA_KERNEL_ASSERT(!(val < zero)); sum = sum + static_cast<accscalar_t>(val); } // threadIdx.x == 0 has the sum value from this sum = cuda_utils::BlockReduceSum(sum, smem); // Broadcast sum and sample value if (threadIdx.x == 0) { // Make sure the sum of our distribution didn't overflow CUDA_KERNEL_ASSERT(!_isinf(val)); CUDA_KERNEL_ASSERT(sum > accZero); foundPos = 0; smem[0] = sum; smem[1] = sampled[curDist]; } __syncthreads(); sum = smem[0]; scalar_t sample = static_cast<scalar_t>(smem[1]); __syncthreads(); if (sum == accZero) { // Choose the first element if (threadIdx.x == 0) { dest[curDist] = 0; } continue; } int chunks = (categories + (int)blockDim.x - 1) / blockDim.x; accscalar_t prevHighProb = accZero; found = false; for (int chunk = 0; chunk < chunks && !found; ++chunk) { // All threads in bounds load a value int cat = chunk * blockDim.x + threadIdx.x; accscalar_t dist_val = cat < categories ? static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum : accZero; smem[threadIdx.x] = dist_val; __syncthreads(); // Perform an inclusive prefix sum of the shared memory contents for (int offset = 1; offset < blockDim.x; offset *= 2) { accscalar_t val = accZero; if (threadIdx.x >= offset) { val = smem[threadIdx.x - offset] + smem[threadIdx.x]; } __syncthreads(); if (threadIdx.x >= offset) { smem[threadIdx.x] = val; } __syncthreads(); } // Each thread will check to see if the sample falls in its // bucket scalar_t curBucket = static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb); scalar_t prevBucket = static_cast<scalar_t>( threadIdx.x == 0 ? prevHighProb : smem[threadIdx.x - 1] + prevHighProb); bool inBucket = (cat < categories) && (!(sample >= curBucket) && (sample >= prevBucket) && (dist_val > zero)); if (inBucket) { // We're done; we have the sample // Torch indices are 1-based atomicMax(&foundPos, cat); found = true; } // Store the previous scan's high value for future use prevHighProb = prevHighProb + smem[blockDim.x - 1]; __syncthreads(); } if (threadIdx.x == 0) { if (found) { dest[curDist] = foundPos; } else { // This should address a rare bug where we don't select a valid index. This likely occurs when // due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but // and our uniform sample is greater than this value. In this case we likely have unitialized memory // in dest[curDist]. So basically we will loop through the distribution and pick the largest index // where the distribution is non-zero. This is obviously terribly inefficient, but due to the // rarity in which this occurs, this should not be an issue. for (int cat = categories - 1; cat >= 0; --cat) { if (dist[curDist * stride_dist + cat * stride_categories] > zero) { dest[curDist] = cat; break; } } } } } } void multinomial_with_replacement_kernel_impl( Tensor& result, const Tensor& self, const int64_t n_sample, c10::optional<Generator> generator) { auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator()); int inputSize = self.dim(); int64_t numDist = inputSize == 1 ? 1 : self.size(0); int numCategories = inputSize == 1 ? self.size(0) : self.size(1); // Restructure data for 2d auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self; result.resize_({numDist, n_sample}); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto props = at::cuda::getCurrentDeviceProperties(); CUDA_KERNEL_ASSERT(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredWarps = at::ceil_div(numCategories, C10_WARP_SIZE); int requiredThreads = std::min(maxThreads, requiredWarps * C10_WARP_SIZE); int requiredShared = requiredThreads * sizeof(accscalar_t); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()), self_v.options().layout_opt(), self_v.options().device_opt(), self_v.options().pinned_memory_opt()); at::native::uniform_(sampled, 0.0, 1.0, generator); dim3 block(requiredThreads); dim3 grid(std::min(static_cast<int>(numDist), numSM * 4)); sampleMultinomialOnce<scalar_t, accscalar_t> <<<grid, block, requiredShared, at::cuda::getCurrentCUDAStream()>>>( result.data_ptr<int64_t>(), numDist, numCategories, sampled.data_ptr<scalar_t>(), self_v.data_ptr<scalar_t>(), self_v.stride(0), self_v.stride(1) ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space Tensor origDist = native::empty_like( self_v, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); origDist.copy_(self_v); Tensor normDist = native::empty_like( self_v, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor prefixSum = native::empty_like( self_v, c10::nullopt /* dtype */, c10::nullopt /* layout */, c10::nullopt /* device */, c10::nullopt /* pin_memory */, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // Renorm along rows normDist.copy_(origDist); renormRows(normDist); // Prefix sum along rows at::cuda::cumsum_out(prefixSum, normDist, 1); PhiloxCudaState rng_engine_inputs; // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(128); // Each block will generate a sample from one // distribution concurrently. int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]); dim3 grid((n_sample-1)/block.x+1, grid_y); { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen->mutex_); // each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use // curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]), // offset is 4 times that. auto offset = ((numDist-1)/grid.y+1)*4; rng_engine_inputs = gen->philox_cuda_state(offset); } // Sample with replacement sampleMultinomialWithReplacement <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( rng_engine_inputs, n_sample, result.data_ptr<int64_t>(), numDist, numCategories, prefixSum.data_ptr<scalar_t>(), normDist.data_ptr<scalar_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); if (inputSize == 1) { result.resize_({n_sample}); } } } REGISTER_DISPATCH( multinomial_with_replacement_stub, &multinomial_with_replacement_kernel_impl); }}
the_stack
#include "open3d/core/Tensor.h" #include "open3d/core/nns/FixedRadiusIndex.h" #include "open3d/core/nns/FixedRadiusSearchImpl.cuh" #include "open3d/core/nns/NeighborSearchAllocator.h" #include "open3d/core/nns/NeighborSearchCommon.h" namespace open3d { namespace core { namespace nns { template <class T> void BuildSpatialHashTableCUDA(const Tensor& points, double radius, const Tensor& points_row_splits, const Tensor& hash_table_splits, Tensor& hash_table_index, Tensor& hash_table_cell_splits) { const cudaStream_t stream = 0; int texture_alignment = 512; void* temp_ptr = nullptr; size_t temp_size = 0; open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.GetShape()[0], points.GetDataPtr<T>(), T(radius), points_row_splits.GetShape()[0], points_row_splits.GetDataPtr<int64_t>(), hash_table_splits.GetDataPtr<uint32_t>(), hash_table_cell_splits.GetShape()[0], hash_table_cell_splits.GetDataPtr<uint32_t>(), hash_table_index.GetDataPtr<uint32_t>()); Device device = points.GetDevice(); Tensor temp_tensor = Tensor::Empty({int64_t(temp_size)}, Dtype::UInt8, device); temp_ptr = temp_tensor.GetDataPtr(); open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.GetShape()[0], points.GetDataPtr<T>(), T(radius), points_row_splits.GetShape()[0], points_row_splits.GetDataPtr<int64_t>(), hash_table_splits.GetDataPtr<uint32_t>(), hash_table_cell_splits.GetShape()[0], hash_table_cell_splits.GetDataPtr<uint32_t>(), hash_table_index.GetDataPtr<uint32_t>()); } template <class T> void FixedRadiusSearchCUDA(const Tensor& points, const Tensor& queries, double radius, const Tensor& points_row_splits, const Tensor& queries_row_splits, const Tensor& hash_table_splits, const Tensor& hash_table_index, const Tensor& hash_table_cell_splits, const Metric metric, const bool ignore_query_point, const bool return_distances, const bool sort, Tensor& neighbors_index, Tensor& neighbors_row_splits, Tensor& neighbors_distance) { const cudaStream_t stream = 0; int texture_alignment = 512; Device device = points.GetDevice(); Dtype dtype = points.GetDtype(); NeighborSearchAllocator<T> output_allocator(device); void* temp_ptr = nullptr; size_t temp_size = 0; open3d::core::nns::impl::FixedRadiusSearchCUDA( stream, temp_ptr, temp_size, texture_alignment, neighbors_row_splits.GetDataPtr<int64_t>(), points.GetShape()[0], points.GetDataPtr<T>(), queries.GetShape()[0], queries.GetDataPtr<T>(), T(radius), points_row_splits.GetShape()[0], points_row_splits.GetDataPtr<int64_t>(), queries_row_splits.GetShape()[0], queries_row_splits.GetDataPtr<int64_t>(), hash_table_splits.GetDataPtr<uint32_t>(), hash_table_cell_splits.GetShape()[0], hash_table_cell_splits.GetDataPtr<uint32_t>(), hash_table_index.GetDataPtr<uint32_t>(), metric, ignore_query_point, return_distances, output_allocator); Tensor temp_tensor = Tensor::Empty({int64_t(temp_size)}, Dtype::UInt8, device); temp_ptr = temp_tensor.GetDataPtr(); open3d::core::nns::impl::FixedRadiusSearchCUDA( stream, temp_ptr, temp_size, texture_alignment, neighbors_row_splits.GetDataPtr<int64_t>(), points.GetShape()[0], points.GetDataPtr<T>(), queries.GetShape()[0], queries.GetDataPtr<T>(), T(radius), points_row_splits.GetShape()[0], points_row_splits.GetDataPtr<int64_t>(), queries_row_splits.GetShape()[0], queries_row_splits.GetDataPtr<int64_t>(), hash_table_splits.GetDataPtr<uint32_t>(), hash_table_cell_splits.GetShape()[0], hash_table_cell_splits.GetDataPtr<uint32_t>(), hash_table_index.GetDataPtr<uint32_t>(), metric, ignore_query_point, return_distances, output_allocator); Tensor indices_unsorted = output_allocator.NeighborsIndex(); Tensor distances_unsorted = output_allocator.NeighborsDistance(); if (!sort) { neighbors_index = indices_unsorted; neighbors_distance = distances_unsorted; } else { // Sort indices & distances. temp_ptr = nullptr; temp_size = 0; int64_t num_indices = indices_unsorted.GetShape()[0]; int64_t num_segments = neighbors_row_splits.GetShape()[0] - 1; Tensor indices_sorted = Tensor::Empty({num_indices}, Dtype::Int32, device); Tensor distances_sorted = Tensor::Empty({num_indices}, dtype, device); // Determine temp_size for sorting open3d::core::nns::impl::SortPairs( temp_ptr, temp_size, texture_alignment, num_indices, num_segments, neighbors_row_splits.GetDataPtr<int64_t>(), indices_unsorted.GetDataPtr<int32_t>(), distances_unsorted.GetDataPtr<T>(), indices_sorted.GetDataPtr<int32_t>(), distances_sorted.GetDataPtr<T>()); temp_tensor = Tensor::Empty({int64_t(temp_size)}, Dtype::UInt8, device); temp_ptr = temp_tensor.GetDataPtr(); // Actually run the sorting. open3d::core::nns::impl::SortPairs( temp_ptr, temp_size, texture_alignment, num_indices, num_segments, neighbors_row_splits.GetDataPtr<int64_t>(), indices_unsorted.GetDataPtr<int32_t>(), distances_unsorted.GetDataPtr<T>(), indices_sorted.GetDataPtr<int32_t>(), distances_sorted.GetDataPtr<T>()); neighbors_index = indices_sorted; neighbors_distance = distances_sorted; } } template <class T> void HybridSearchCUDA(const Tensor& points, const Tensor& queries, double radius, int max_knn, const Tensor& points_row_splits, const Tensor& queries_row_splits, const Tensor& hash_table_splits, const Tensor& hash_table_index, const Tensor& hash_table_cell_splits, const Metric metric, Tensor& neighbors_index, Tensor& neighbors_count, Tensor& neighbors_distance) { const cudaStream_t stream = 0; Device device = points.GetDevice(); NeighborSearchAllocator<T> output_allocator(device); open3d::core::nns::impl::HybridSearchCUDA( stream, points.GetShape()[0], points.GetDataPtr<T>(), queries.GetShape()[0], queries.GetDataPtr<T>(), T(radius), max_knn, points_row_splits.GetShape()[0], points_row_splits.GetDataPtr<int64_t>(), queries_row_splits.GetShape()[0], queries_row_splits.GetDataPtr<int64_t>(), hash_table_splits.GetDataPtr<uint32_t>(), hash_table_cell_splits.GetShape()[0], hash_table_cell_splits.GetDataPtr<uint32_t>(), hash_table_index.GetDataPtr<uint32_t>(), metric, output_allocator); neighbors_index = output_allocator.NeighborsIndex(); neighbors_distance = output_allocator.NeighborsDistance(); neighbors_count = output_allocator.NeighborsCount(); } #define INSTANTIATE_BUILD(T) \ template void BuildSpatialHashTableCUDA<T>( \ const Tensor& points, double radius, \ const Tensor& points_row_splits, const Tensor& hash_table_splits, \ Tensor& hash_table_index, Tensor& hash_table_cell_splits); #define INSTANTIATE_RADIUS(T) \ template void FixedRadiusSearchCUDA<T>( \ const Tensor& points, const Tensor& queries, double radius, \ const Tensor& points_row_splits, const Tensor& queries_row_splits, \ const Tensor& hash_table_splits, const Tensor& hash_table_index, \ const Tensor& hash_table_cell_splits, const Metric metric, \ const bool ignore_query_point, const bool return_distances, \ const bool sort, Tensor& neighbors_index, \ Tensor& neighbors_row_splits, Tensor& neighbors_distance); #define INSTANTIATE_HYBRID(T) \ template void HybridSearchCUDA<T>( \ const Tensor& points, const Tensor& queries, double radius, \ int max_knn, const Tensor& points_row_splits, \ const Tensor& queries_row_splits, const Tensor& hash_table_splits, \ const Tensor& hash_table_index, \ const Tensor& hash_table_cell_splits, const Metric metric, \ Tensor& neighbors_index, Tensor& neighbors_count, \ Tensor& neighbors_distance); INSTANTIATE_BUILD(float) INSTANTIATE_BUILD(double) INSTANTIATE_RADIUS(float) INSTANTIATE_RADIUS(double) INSTANTIATE_HYBRID(float) INSTANTIATE_HYBRID(double) } // namespace nns } // namespace core } // namespace open3d
the_stack
__device__ int Quantities_scalefactor_space_acceldir(int ix_g, int iy_g, int iz_g) { int result = 0; #ifndef RELAXED_TESTING const int im = 134456; const int ia = 8121; const int ic = 28411; result = ( (result+(ix_g+2))*ia + ic ) % im; result = ( (result+(iy_g+2))*ia + ic ) % im; result = ( (result+(iz_g+2))*ia + ic ) % im; result = ( (result+(ix_g+3*iy_g+7*iz_g+2))*ia + ic ) % im; result = ix_g+3*iy_g+7*iz_g+2; result = result & ( (1<<2) - 1 ); #endif result = 1 << result; return result; } __device__ P Quantities_init_face_acceldir(int ia, int ie, int iu, int scalefactor_space, int octant) { /*--- Quantities_affinefunction_ inline ---*/ return ( (P) (1 + ia) ) /*--- Quantities_scalefactor_angle_ inline ---*/ * ( (P) (1 << (ia & ( (1<<3) - 1))) ) /*--- Quantities_scalefactor_space_ inline ---*/ * ( (P) scalefactor_space) /*--- Quantities_scalefactor_energy_ inline ---*/ * ( (P) (1 << ((( (ie) * 1366 + 150889) % 714025) & ( (1<<2) - 1))) ) /*--- Quantities_scalefactor_unknown_ inline ---*/ * ( (P) (1 << ((( (iu) * 741 + 60037) % 312500) & ( (1<<2) - 1))) ) /*--- Quantities_scalefactor_octant_ ---*/ * ( (P) 1 + octant); } __device__ void Quantities_solve_acceldir(P* vs_local, Dimensions dims, P* facexy, P* facexz, P* faceyz, int ix, int iy, int iz, int ix_g, int iy_g, int iz_g, int ie, int ia, int octant, int octant_in_block, int noctant_per_block) { const int dir_x = Dir_x( octant ); const int dir_y = Dir_y( octant ); const int dir_z = Dir_z( octant ); int iu = 0; /*---Average the face values and accumulate---*/ /*---The state value and incoming face values are first adjusted to normalized values by removing the spatial scaling. They are then combined using a weighted average chosen in a special way to give just the expected result. Finally, spatial scaling is applied to the result which is then stored. ---*/ /*--- Quantities_scalefactor_octant_ inline ---*/ const P scalefactor_octant = 1 + octant; const P scalefactor_octant_r = ((P)1) / scalefactor_octant; /*---Quantities_scalefactor_space_ inline ---*/ const P scalefactor_space = (P)Quantities_scalefactor_space_acceldir(ix_g, iy_g, iz_g); const P scalefactor_space_r = ((P)1) / scalefactor_space; const P scalefactor_space_x_r = ((P)1) / Quantities_scalefactor_space_acceldir( ix_g - dir_x, iy_g, iz_g ); const P scalefactor_space_y_r = ((P)1) / Quantities_scalefactor_space_acceldir( ix_g, iy_g - dir_y, iz_g ); const P scalefactor_space_z_r = ((P)1) / Quantities_scalefactor_space_acceldir( ix_g, iy_g, iz_g - dir_z ); #ifdef USE_OPENMP_TARGET // no equivalent #elif defined(USE_ACC) #pragma acc loop seq #endif for( iu=0; iu<NU; ++iu ) { int vs_local_index = ia + dims.na * ( iu + NU * ( ie + dims.ne * ( ix + dims.ncell_x * ( iy + dims.ncell_y * ( octant + NOCTANT * ( 0)))))); const P result = ( vs_local[vs_local_index] * scalefactor_space_r + ( /*--- ref_facexy inline ---*/ facexy[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( ix + dims.ncell_x * ( iy + dims.ncell_y * ( octant + NOCTANT * ( 0 )))))) ] /*--- Quantities_xfluxweight_ inline ---*/ * (P) ( 1 / (P) 2 ) * scalefactor_space_z_r /*--- ref_facexz inline ---*/ + facexz[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( ix + dims.ncell_x * ( iz + dims.ncell_z * ( octant + NOCTANT * ( 0 )))))) ] /*--- Quantities_yfluxweight_ inline ---*/ * (P) ( 1 / (P) 4 ) * scalefactor_space_y_r /*--- ref_faceyz inline ---*/ + faceyz[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( iy + dims.ncell_y * ( iz + dims.ncell_z * ( octant + NOCTANT * ( 0 )))))) ] /*--- Quantities_zfluxweight_ inline ---*/ * (P) ( 1 / (P) 4 - 1 / (P) (1 << ( ia & ( (1<<3) - 1 ) )) ) * scalefactor_space_x_r ) * scalefactor_octant_r ) * scalefactor_space; vs_local[vs_local_index] = result; const P result_scaled = result * scalefactor_octant; /*--- ref_facexy inline ---*/ facexy[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( ix + dims.ncell_x * ( iy + dims.ncell_y * ( octant + NOCTANT * ( 0 )))))) ] = result_scaled; /*--- ref_facexz inline ---*/ facexz[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( ix + dims.ncell_x * ( iz + dims.ncell_z * ( octant + NOCTANT * ( 0 )))))) ] = result_scaled; /*--- ref_faceyz inline ---*/ faceyz[ia + dims.na * ( iu + NU * ( ie + dims.ne * ( iy + dims.ncell_y * ( iz + dims.ncell_z * ( octant + NOCTANT * ( 0 )))))) ] = result_scaled; } /*---for---*/ } __device__ void Sweeper_sweep_cell_acceldir( const Dimensions &dims, int wavefront, int octant, int ix, int iy, int ix_g, int iy_g, int iz_g, int dir_x, int dir_y, int dir_z, P* __restrict__ facexy, P* __restrict__ facexz, P* __restrict__ faceyz, const P* __restrict__ a_from_m, const P* __restrict__ m_from_a, const P* vi, P* vo, P* vs_local, int octant_in_block, int noctant_per_block, int ie ) { /*---Declarations---*/ // int iz = 0; // int ie = 0; int im = 0; int ia = 0; int iu = 0; /* int octant = 0; */ /*--- Dimensions ---*/ int dims_ncell_x = dims.ncell_x; int dims_ncell_y = dims.ncell_y; int dims_ncell_z = dims.ncell_z; int dims_ne = dims.ne; int dims_na = dims.na; int dims_nm = dims.nm; /*--- Solve for Z dimension, and check bounds. The sum of the dimensions should equal the wavefront number. If z < 0 or z > wavefront number, we are out of bounds. Z also shouldn't exceed the spacial bound for the z dimension. The calculation is adjusted for the direction of each axis in a given octant. ---*/ const int ixwav = dir_x==DIR_UP ? ix : (dims_ncell_x-1) - ix; const int iywav = dir_y==DIR_UP ? iy : (dims_ncell_y-1) - iy; const int izwav = wavefront - ixwav - iywav; const int iz = dir_z==DIR_UP ? izwav : (dims_ncell_z-1) - izwav; // int ixwav, iywav, izwav; // if (dir_x==DIR_UP) { ixwav = ix; } else { ixwav = (dims_ncell_x-1) - ix; } // if (dir_y==DIR_UP) { iywav = iy; } else { iywav = (dims_ncell_y-1) - iy; } // if (dir_z==DIR_UP) { // iz = wavefront - (ixwav + iywav); } // else { // iz = (dims_ncell_z-1) - (wavefront - (ixwav + iywav)); // } /*--- Bounds check ---*/ if ((iz >= 0 && iz < dims_ncell_z) )// && /* ((dir_z==DIR_UP && iz <= wavefront) || */ /* (dir_z==DIR_DN && (dims_ncell_z-1-iz) <= wavefront))) */ { /*---Loop over energy groups---*/ // for( ie=0; ie<dims_ne; ++ie ) { /*--------------------*/ /*---Transform state vector from moments to angles---*/ /*--------------------*/ /*---This loads values from the input state vector, does the small dense matrix-vector product, and stores the result in a relatively small local array that is hopefully small enough to fit into processor cache. ---*/ for( iu=0; iu<NU; ++iu ) for( ia=0; ia<dims_na; ++ia ) { // reset reduction P result = (P)0; for( im=0; im < dims_nm; ++im ) { /*--- const_ref_a_from_m inline ---*/ result += a_from_m[ ia + dims_na * ( im + NM * ( octant + NOCTANT * ( 0 ))) ] * /*--- const_ref_state inline ---*/ vi[im + dims.nm * ( iu + NU * ( ix + dims_ncell_x * ( iy + dims_ncell_y * ( ie + dims_ne * ( iz + dims_ncell_z * ( /*---NOTE: This axis MUST be slowest-varying---*/ 0 ))))))]; } /*--- ref_vslocal inline ---*/ vs_local[ ia + dims.na * ( iu + NU * ( ie + dims_ne * ( ix + dims_ncell_x * ( iy + dims_ncell_y * ( octant + NOCTANT * ( 0)))))) ] = result; } } /*--------------------*/ /*---Perform solve---*/ /*--------------------*/ // /*---Loop over energy groups---*/ for( ia=0; ia<dims_na; ++ia ) { Quantities_solve_acceldir(vs_local, dims, facexy, facexz, faceyz, ix, iy, iz, ix_g, iy_g, iz_g, ie, ia, octant, octant_in_block, noctant_per_block); } /*--------------------*/ /*---Transform state vector from angles to moments---*/ /*--------------------*/ /*---Perform small dense matrix-vector products and store the result in the output state vector. ---*/ /*---Loop over energy groups---*/ for( iu=0; iu<NU; ++iu ) for( im=0; im<dims_nm; ++im ) { P result = (P)0; for( ia=0; ia<dims_na; ++ia ) { /*--- const_ref_m_from_a ---*/ result += m_from_a[ im + NM * ( ia + dims_na * ( octant + NOCTANT * ( 0 ))) ] * /*--- const_ref_vslocal ---*/ vs_local[ ia + dims_na * ( iu + NU * ( ie + dims_ne * ( ix + dims_ncell_x * ( iy + dims_ncell_y * ( octant + NOCTANT * ( 0 )))))) ]; } /*--- ref_state inline ---*/ atomicAdd( &vo[im + dims.nm * ( iu + NU * ( ix + dims_ncell_x * ( iy + dims_ncell_y * ( ie + dims_ne * ( iz + dims_ncell_z * ( /*---NOTE: This axis MUST be slowest-varying---*/ 0 ))))))] , result); } // } /*---ie---*/ } /*--- iz ---*/ } __global__ void init_facexy( const int ix_base, const int iy_base, const int dims_b_ne, const int dims_b_na, const int dims_b_ncell_x, const int dims_b_ncell_y, const int dims_b_ncell_z, const int dims_ncell_z, P* facexy) { int ix = blockDim.x * blockIdx.x + threadIdx.x; int iy = blockDim.y * blockIdx.y + threadIdx.y; int octant = blockDim.z * blockIdx.z + threadIdx.z; if (ix >= dims_b_ncell_x || iy >= dims_b_ncell_y || octant >= NOCTANT) return; for(int ie=0; ie<dims_b_ne; ++ie ) for(int iu=0; iu<NU; ++iu ) for(int ia=0; ia<dims_b_na; ++ia ) { const int dir_z = Dir_z( octant ); const int iz = dir_z == DIR_UP ? -1 : dims_b_ncell_z; const int ix_g = ix + ix_base; // dims_b_ncell_x * proc_x; const int iy_g = iy + iy_base; // dims_b_ncell_y * proc_y; const int iz_g = iz + (dir_z == DIR_UP ? 0 : dims_ncell_z - dims_b_ncell_z); //const int iz_g = iz + stepinfoall.stepinfo[octant].block_z * dims_b_ncell_z; /*--- Quantities_scalefactor_space_ inline ---*/ const int scalefactor_space = Quantities_scalefactor_space_acceldir(ix_g, iy_g, iz_g); /*--- ref_facexy inline ---*/ facexy[FACEXY_ADDR(dims_b_ncell_x, dims_b_ncell_y)] /*--- Quantities_init_face routine ---*/ = Quantities_init_face_acceldir(ia, ie, iu, scalefactor_space, octant); //printf("kernel facexy: %d %d %d %d %d %f\n", //ia, ie, iu, scalefactor_space, octant, //Quantities_init_face_acceldir(ia, ie, iu, scalefactor_space, octant)); } /*---for---*/ } __global__ void init_facexz( const int ix_base, const int iy_base, const int dims_b_ne, const int dims_b_na, const int dims_b_ncell_x, const int dims_b_ncell_y, const int dims_b_ncell_z, const int proc_y_min, const int proc_y_max, StepInfoAll stepinfoall, P* facexz ) { int ix = blockDim.x * blockIdx.x + threadIdx.x; int iz = blockDim.y * blockIdx.y + threadIdx.y; int octant = blockDim.z * blockIdx.z + threadIdx.z; if (ix >= dims_b_ncell_x || iz >= dims_b_ncell_z || octant >= NOCTANT) return; for(int ie=0; ie<dims_b_ne; ++ie ) for(int iu=0; iu<NU; ++iu ) for(int ia=0; ia<dims_b_na; ++ia ) { const int dir_y = Dir_y( octant ); const int iy = dir_y == DIR_UP ? -1 : dims_b_ncell_y; const int ix_g = ix + ix_base; // dims_b_ncell_x * proc_x; const int iy_g = iy + iy_base; // dims_b_ncell_y * proc_y; const int iz_g = iz + stepinfoall.stepinfo[octant].block_z * dims_b_ncell_z; if ((dir_y == DIR_UP && proc_y_min) || (dir_y == DIR_DN && proc_y_max)) { /*--- Quantities_scalefactor_space_ inline ---*/ const int scalefactor_space = Quantities_scalefactor_space_acceldir(ix_g, iy_g, iz_g); /*--- ref_facexz inline ---*/ facexz[FACEXZ_ADDR(dims_b_ncell_x, dims_b_ncell_z)] /*--- Quantities_init_face routine ---*/ = Quantities_init_face_acceldir(ia, ie, iu, scalefactor_space, octant); } /*---if---*/ } /*---for---*/ } __global__ void init_faceyz( const int ix_base, const int iy_base, const int dims_b_ne, const int dims_b_na, const int dims_b_ncell_x, const int dims_b_ncell_y, const int dims_b_ncell_z, const int proc_x_min, const int proc_x_max, StepInfoAll stepinfoall, P* faceyz) { int iy = blockDim.x * blockIdx.x + threadIdx.x; int iz = blockDim.y * blockIdx.y + threadIdx.y; int octant = blockDim.z * blockIdx.z + threadIdx.z; if (iy >= dims_b_ncell_y || iz >= dims_b_ncell_z || octant >= NOCTANT) return; for(int ie=0; ie<dims_b_ne; ++ie ) for(int iu=0; iu<NU; ++iu ) for(int ia=0; ia<dims_b_na; ++ia ) { const int dir_x = Dir_x( octant ); const int ix = dir_x == DIR_UP ? -1 : dims_b_ncell_x; const int ix_g = ix + ix_base; // dims_b_ncell_x * proc_x; const int iy_g = iy + iy_base; // dims_b_ncell_y * proc_y; const int iz_g = iz + stepinfoall.stepinfo[octant].block_z * dims_b_ncell_z; if ((dir_x == DIR_UP && proc_x_min) || (dir_x == DIR_DN && proc_x_max)) { /*--- Quantities_scalefactor_space_ inline ---*/ const int scalefactor_space = Quantities_scalefactor_space_acceldir(ix_g, iy_g, iz_g); /*--- ref_faceyz inline ---*/ faceyz[FACEYZ_ADDR(dims_b_ncell_y, dims_b_ncell_z)] /*--- Quantities_init_face routine ---*/ = Quantities_init_face_acceldir(ia, ie, iu, scalefactor_space, octant); } /*---if---*/ } /*---for---*/ } __global__ void wavefronts( const int num_wavefronts, const int ix_base, const int iy_base, const int v_b_size, const int noctant_per_block, const Dimensions dims_b, StepInfoAll stepinfoall, P* facexy, P* facexz, P* faceyz, P* a_from_m, P* m_from_a, P* vi, P* vo, P* vs_local) { int octant = blockDim.x * blockIdx.x + threadIdx.x; int ie = blockDim.y * blockIdx.y + threadIdx.y; if (ie >= dims_b.ne || octant >= NOCTANT) return; const int dims_b_ncell_x = dims_b.ncell_x; const int dims_b_ncell_y = dims_b.ncell_y; const int dims_b_ncell_z = dims_b.ncell_z; /*--- Loop over wavefronts ---*/ for (int wavefront = 0; wavefront < num_wavefronts; wavefront++) { for( int iywav=0; iywav<dims_b_ncell_y; ++iywav ) for( int ixwav=0; ixwav<dims_b_ncell_x; ++ixwav ) { if (stepinfoall.stepinfo[octant].is_active) { /*---Decode octant directions from octant number---*/ const int dir_x = Dir_x( octant ); const int dir_y = Dir_y( octant ); const int dir_z = Dir_z( octant ); const int octant_in_block = octant; const int ix = dir_x==DIR_UP ? ixwav : dims_b_ncell_x - 1 - ixwav; const int iy = dir_y==DIR_UP ? iywav : dims_b_ncell_y - 1 - iywav; const int izwav = wavefront - ixwav - iywav; const int iz = dir_z==DIR_UP ? izwav : (dims_b_ncell_z-1) - izwav; const int ix_g = ix + ix_base; // dims_b_ncell_x * proc_x; const int iy_g = iy + iy_base; // dims_b_ncell_y * proc_y; const int iz_g = iz + stepinfoall.stepinfo[octant].block_z * dims_b_ncell_z; const int v_offset = stepinfoall.stepinfo[octant].block_z * v_b_size; /*--- In-gridcell computations ---*/ Sweeper_sweep_cell_acceldir( dims_b, wavefront, octant, ix, iy, ix_g, iy_g, iz_g, dir_x, dir_y, dir_z, facexy, facexz, faceyz, a_from_m, m_from_a, &(vi[v_offset]), &(vo[v_offset]), vs_local, octant_in_block, noctant_per_block, ie ); } /*---if---*/ } /*---octant/ix/iy---*/ } /*--- wavefront ---*/ }
the_stack
#include <assert.h> #include <pthread.h> #include <stdint.h> #include <stdio.h> /* every tool needs to include this once */ #include "nvbit_tool.h" /* nvbit interface file */ #include "nvbit.h" /* provide some __device__ functions */ #include "utils/utils.h" // Used for tracking pointers for checkpointing #include <unordered_map> #include <algorithm> #include <tuple> #include <string> std::unordered_map<void*, std::tuple<int, size_t>> tracking_map; // We only want the call-back for an allocation // This contains the valid device pointer int callback_tracker = 0; int free_count = 0; int alloc_count = 0; int snapshot_number = 0; // Vector of kernel IDs to snapshot //const std::vector<int> skip = {0, 1, 2, 3, 4, 5, 6, 7}; /* kernel id counter, maintained in system memory */ uint32_t kernel_id = 0; /* total instruction counter, maintained in system memory, incremented by * "counter" every time a kernel completes */ uint64_t tot_app_instrs = 0; /* kernel instruction counter, updated by the GPU */ __managed__ uint64_t counter = 0; /* global control variables for this tool */ uint32_t instr_begin_interval = 0; uint32_t instr_end_interval = UINT32_MAX; uint32_t ker_begin_interval = 0; uint32_t ker_end_interval = UINT32_MAX; int verbose = 0; int count_warp_level = 1; int exclude_pred_off = 0; /* a pthread mutex, used to prevent multiple kernels to run concurrently and * therefore to "corrupt" the counter variable */ pthread_mutex_t mutex; /* instrumentation function that we want to inject, please note the use of * 1. "extern "C" __device__ __noinline__" to prevent code elimination by the * compiler. * 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function * we want to inject. This name must match exactly the function name */ extern "C" __device__ __noinline__ void count_instrs(int predicate, int count_warp_level) { /* all the active threads will compute the active mask */ const int active_mask = __ballot(1); /* compute the predicate mask */ const int predicate_mask = __ballot(predicate); /* each thread will get a lane id (get_lane_id is in utils/utils.h) */ const int laneid = get_laneid(); /* get the id of the first active thread */ const int first_laneid = __ffs(active_mask) - 1; /* count all the active thread */ const int num_threads = __popc(predicate_mask); /* only the first active thread will perform the atomic */ if (first_laneid == laneid) { if (count_warp_level) { /* num threads can be zero when accounting for predicates off */ if (num_threads > 0) atomicAdd((unsigned long long *)&counter, 1); } else { atomicAdd((unsigned long long *)&counter, num_threads); } } } NVBIT_EXPORT_FUNC(count_instrs); /* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We typically * do initializations in this call. In this case for instance we get some * environment variables values which we use as input arguments to the tool */ void nvbit_at_init() { /* just make sure all managed variables are allocated on GPU */ setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1); /* we get some environment variables that are going to be use to selectively * instrument (within a interval of kernel indexes and instructions). By * default we instrument everything. */ GET_VAR_INT( instr_begin_interval, "INSTR_BEGIN", 0, "Beginning of the instruction interval where to apply instrumentation"); GET_VAR_INT( instr_end_interval, "INSTR_END", UINT32_MAX, "End of the instruction interval where to apply instrumentation"); GET_VAR_INT(ker_begin_interval, "KERNEL_BEGIN", 0, "Beginning of the kernel launch interval where to apply " "instrumentation"); GET_VAR_INT( ker_end_interval, "KERNEL_END", UINT32_MAX, "End of the kernel launch interval where to apply instrumentation"); GET_VAR_INT(count_warp_level, "COUNT_WARP_LEVEL", 1, "Count warp level or thread level instructions"); GET_VAR_INT(exclude_pred_off, "EXCLUDE_PRED_OFF", 0, "Exclude predicated off instruction from count"); GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool"); std::string pad(100, '-'); printf("%s\n", pad.c_str()); } /* nvbit_at_function_first_load() is executed every time a function is loaded * for the first time. Inside this call-back we typically get the vector of SASS * instructions composing the loaded CUfunction. We can iterate on this vector * and insert call to instrumentation functions before or after each one of * them. */ void nvbit_at_function_first_load(CUcontext ctx, CUfunction func) { /* Get the vector of instruction composing the loaded CUFunction "func" */ const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, func); /* If verbose we print function name and number of" static" instructions */ if (verbose) { printf("inspecting %s - num instrs %ld\n", nvbit_get_func_name(ctx, func), instrs.size()); } /* We iterate on the vector of instruction */ for (auto i : instrs) { /* Check if the instruction falls in the interval where we want to * instrument */ if (i->getIdx() >= instr_begin_interval && i->getIdx() < instr_end_interval) { /* If verbose we print which instruction we are instrumenting (both * offset in the function and SASS string) */ if (verbose) { i->print(); } /* Insert a call to "count_instrs" before the instruction "i" */ nvbit_insert_call(i, "count_instrs", IPOINT_BEFORE); if (exclude_pred_off) { /* pass predicate value */ nvbit_add_call_arg_pred_val(i); } else { /* pass always true */ nvbit_add_call_arg_const_val32(i, 1); } /* add count warps option */ nvbit_add_call_arg_const_val32(i, count_warp_level); } } } /* This call-back is triggered every time a CUDA driver call is encountered. * Here we can look for a particular CUDA driver call by checking at the * call back ids which are defined in tools_cuda_api_meta.h. * This call back is triggered bith at entry and at exit of each CUDA driver * call, is_exit=0 is entry, is_exit=1 is exit. * */ void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid, const char *name, void *params, CUresult *pStatus) { // Add entry for memory on and allocate if(cbid == API_CUDA_cuMemAlloc_v2){ callback_tracker++; if(callback_tracker % 2 == 0){ // This is the call-back, so the next will be the original call cuMemAlloc_v2_params *p= (cuMemAlloc_v2_params*)params; printf("Found an allocation!\n"); tracking_map.insert({(void*)*p->dptr, std::make_tuple(alloc_count++, p->bytesize)}); printf("%p, %zu\n", *p->dptr, p->bytesize); } } // Delete entry for memory on a free if(cbid == API_CUDA_cuMemFree_v2){ callback_tracker++; if(callback_tracker % 2 == 0){ // This is the call-back, so the next will be the original call printf("Found a free!\n"); cuMemFree_v2_params *p= (cuMemFree_v2_params*)params; tracking_map.erase((void*)p->dptr); printf("%p\n", p->dptr); // Test-dump the map for(const auto pair : tracking_map){ printf("Address: %p, Number: %d, Size: %zu\n", pair.first, std::get<0>(pair.second), std::get<1>(pair.second)); } } } // Why do I even care about HtoD? if(cbid == API_CUDA_cuMemcpyHtoD_v2){ printf("Encountered a memcpy HtoD!\n"); cuMemcpyHtoD_v2_params *p= (cuMemcpyHtoD_v2_params*)params; printf("%p, %p, %zu\n", p->dstDevice, p->srcHost, p->ByteCount); } /* Identify all the possible CUDA launch events */ if (cbid == API_CUDA_cuLaunch || cbid == API_CUDA_cuLaunchKernel_ptsz || cbid == API_CUDA_cuLaunchGrid || cbid == API_CUDA_cuLaunchGridAsync || cbid == API_CUDA_cuLaunchKernel) { /* cast params to cuLaunch_params since if we are here we know these are * the right parameters type */ cuLaunch_params *p = (cuLaunch_params *)params; if (!is_exit) { /* if we are entering in a kernel launch: * 1. Lock the mutex to prevent multiple kernels to run concurrently * (overriding the counter) in case the user application does that * 2. Select if we want to run the instrumented or original * version of the kernel * 3. Reset the kernel instruction counter */ // Lock until kernel exit (enforces serialization) pthread_mutex_lock(&mutex); } else { /* if we are exiting a kernel launch: * 1. Wait until the kernel is completed using * cudaDeviceSynchronize() * 2. Get number of thread blocks in the kernel * 3. Print the thread instruction counters * 4. Release the lock*/ CUDA_SAFECALL(cudaDeviceSynchronize()); // Only snapshot selected kernels if(1){ // Dump a snapshot of the valid GPU memory state after each kernel for(const auto pair : tracking_map){ // Extract the data from the map size_t bytes = std::get<1>(pair.second); int alloc_number = std::get<0>(pair.second); void *tmp = pair.first; // Open a new file for this snapshot std::string name = std::to_string(kernel_id) + "_" + std::to_string(alloc_number) + ".txt"; FILE *f = fopen(name.c_str(), "w"); // Copy the data from the device std::vector<uint8_t> buffer; buffer.resize(bytes); cudaMemcpy(buffer.data(), tmp, bytes, cudaMemcpyDeviceToHost); // Write the data to a file for(auto i : buffer){ fprintf(f, "%hhu ", i); } } } // Update the kernel ID kernel_id++; // Allow the next call to proceed pthread_mutex_unlock(&mutex); } } }
the_stack
#include <chrono> #include <iostream> #include <unistd.h> #include <vector> __device__ half float_to_sto_half_direct(float w) { curandState_t state; curand_init((unsigned long long)(w * 100), 0, 0, &state); half up = __float2half_ru(w); half down = __float2half_rd(w); const float up_f32 = __half2float(up); const float down_f32 = __half2float(down); // 1 - (w - w_down) / (w_up - w_down) = (w_up - w) / (w_up - w_down) = n / m const float m = (up_f32 - down_f32); const float rand = curand_uniform(&state); if (__float_as_uint(m) == 0) { return up; } const float n = (up_f32 - w); return rand > n / m ? up : down; } __device__ float two_to_e(float X) { const float Y = 16777216 * X; // 2^24 const float U = ((Y + X) - Y) * 0.5; return U == 0 ? X : U; } __device__ half float_to_sto_half_bitcarry(float w) { curandState_t state; curand_init((unsigned long long)(w * 100), 0, 0, &state); float rand = curand_uniform(&state); float rand_match_w = two_to_e(w) * rand * 0.0009765625; // 2^(-10) float Z = w + rand_match_w; return __float2half_rz(Z); } __device__ half float_to_sto_half_shortrand(float w, uint8_t rand) { const unsigned w_int = __float_as_uint(w); const unsigned w_new = w_int + (rand << 5); return __float2half_rz(__uint_as_float(w_new)); } __device__ half float_to_sto_half_assemblefloat(float w, uint8_t rand) { const unsigned w_int = __float_as_uint(w); const unsigned assmebles = (w_int & 0xff800000) | (rand << 5); const unsigned subtract = (w_int & 0xff800000); const float assmeble_float = __uint_as_float(assmebles) - __uint_as_float(subtract); return __float2half_rz(w + assmeble_float); } __global__ void convert_float_to_half_direct(half* dst, float* src, int size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { dst[idx] = float_to_sto_half_direct(src[idx]); } } __global__ void convert_float_to_half_bitcarry(half* dst, float* src, int size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { dst[idx] = float_to_sto_half_bitcarry(src[idx]); } } __global__ void convert_float_to_half_shortrand(half* dst, float* src, uint8_t* r, int size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { dst[idx] = float_to_sto_half_shortrand(src[idx], r[idx]); } } __global__ void convert_float_to_half_assemblefloat( half* dst, float* src, uint8_t* r, int size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { dst[idx] = float_to_sto_half_assemblefloat(src[idx], r[idx]); } } void gen_data(float* d_f32_array, int test_size) { curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // Random seed curandGenerateUniform(gen, d_f32_array, test_size); curandDestroyGenerator(gen); cudaDeviceSynchronize(); } // generate 64bit random number and then copy back to 8bit memory void gen_8bit_random(uint8_t* d_random_number, int test_size) { curandGenerator_t gen; unsigned* d_random_number_f32; cudaMalloc( &d_random_number_f32, (test_size / sizeof(unsigned) + 1) * sizeof(unsigned)); curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); curandSetPseudoRandomGeneratorSeed(gen, 5678ULL); // Random seed curandGenerate(gen, d_random_number_f32, (test_size / sizeof(unsigned) + 1)); cudaMemcpy( d_random_number, d_random_number_f32, test_size * sizeof(uint8_t), cudaMemcpyDeviceToDevice); curandDestroyGenerator(gen); cudaFree(d_random_number_f32); } __global__ void flush_gpu(char* d_flush, char* d_flush2, bool do_write) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const char val = d_flush[idx]; if (do_write * val) { d_flush2[idx] = val; } } void flush_cache( std::vector<char> flush, char* d_flush, char* d_flush2, int cache_size, bool do_write = false) { cudaMemcpy(d_flush, flush.data(), cache_size, cudaMemcpyHostToDevice); const unsigned num_blocks = cache_size / 512; flush_gpu<<<num_blocks, 512>>>(d_flush, d_flush2, do_write); cudaDeviceSynchronize(); } int main(int argc, char* argv[]) { std::vector<float> f32_array; std::vector<half> f16_direct_array; std::vector<half> f16_bitcarry_array; std::vector<half> f16_shortrand_array; std::vector<half> f16_assemblefloat_array; float* d_f32_array; half* d_f16_direct_array; half* d_f16_bitcarry_array; half* d_f16_shortrand_array; half* d_f16_assemblefloat_array; uint8_t* d_random_number; std::vector<char> flush; char* d_flush; char* d_flush2; int test_size = 10; bool verbose = false; int opt; while ((opt = getopt(argc, argv, "n:v")) != -1) { switch (opt) { case 'n': test_size = atoi(optarg); break; case 'v': verbose = true; break; } } std::cout << "Start stochastic algorithm tests with test_size = " << test_size << std::endl; constexpr int cache_size = 40 * 1024 * 1024; // A100 40MB L2 cache f32_array.reserve(test_size); f16_direct_array.reserve(test_size); f16_bitcarry_array.reserve(test_size); f16_shortrand_array.reserve(test_size); f16_assemblefloat_array.reserve(test_size); cudaMalloc(&d_f32_array, test_size * sizeof(float)); cudaMalloc(&d_f16_direct_array, test_size * sizeof(half)); cudaMalloc(&d_f16_bitcarry_array, test_size * sizeof(half)); cudaMalloc(&d_f16_shortrand_array, test_size * sizeof(half)); cudaMalloc(&d_f16_assemblefloat_array, test_size * sizeof(half)); cudaMalloc(&d_random_number, test_size * sizeof(uint8_t)); flush.assign(cache_size, 255); cudaMalloc(&d_flush, cache_size * sizeof(char)); cudaMalloc(&d_flush2, cache_size * sizeof(char)); gen_data(d_f32_array, test_size); gen_8bit_random(d_random_number, test_size); constexpr int block_size = 128; const int num_blocks = (test_size + block_size - 1) / block_size; flush_cache(flush, d_flush, d_flush2, cache_size); std::cout << "Starting algorithm direct..." << std::endl; auto start = std::chrono::high_resolution_clock::now(); convert_float_to_half_direct<<<num_blocks, block_size>>>( d_f16_direct_array, d_f32_array, test_size); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl; exit(-1); } std::chrono::duration<double> time = end - start; std::cout << "Direct stochastic algorithm runs: " << time.count() << " sec " << std::endl; flush_cache(flush, d_flush, d_flush2, cache_size); std::cout << "Starting algorithm bitcarry..." << std::endl; start = std::chrono::high_resolution_clock::now(); convert_float_to_half_bitcarry<<<num_blocks, block_size>>>( d_f16_bitcarry_array, d_f32_array, test_size); cudaDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); e = cudaGetLastError(); if (e != cudaSuccess) { std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl; exit(-1); } time = end - start; std::cout << "Bitcarry stochastic algorithm runs: " << time.count() << " sec" << std::endl; flush_cache(flush, d_flush, d_flush2, cache_size); std::cout << "Starting algorithm shortrand..." << std::endl; start = std::chrono::high_resolution_clock::now(); convert_float_to_half_shortrand<<<num_blocks, block_size>>>( d_f16_shortrand_array, d_f32_array, d_random_number, test_size); cudaDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); e = cudaGetLastError(); if (e != cudaSuccess) { std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl; exit(-1); } time = end - start; std::cout << "Shortrand stochastic algorithm runs: " << time.count() << " sec" << std::endl; flush_cache(flush, d_flush, d_flush2, cache_size); std::cout << "Starting algorithm assemblefloat..." << std::endl; start = std::chrono::high_resolution_clock::now(); convert_float_to_half_assemblefloat<<<num_blocks, block_size>>>( d_f16_assemblefloat_array, d_f32_array, d_random_number, test_size); cudaDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); e = cudaGetLastError(); if (e != cudaSuccess) { std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl; exit(-1); } time = end - start; std::cout << "Assemblefloat stochastic algorithm runs: " << time.count() << " sec" << std::endl; if (verbose) { cudaMemcpy( f32_array.data(), d_f32_array, test_size * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy( f16_direct_array.data(), d_f16_direct_array, test_size * sizeof(half), cudaMemcpyDeviceToHost); cudaMemcpy( f16_bitcarry_array.data(), d_f16_bitcarry_array, test_size * sizeof(half), cudaMemcpyDeviceToHost); cudaMemcpy( f16_shortrand_array.data(), d_f16_shortrand_array, test_size * sizeof(half), cudaMemcpyDeviceToHost); cudaMemcpy( f16_assemblefloat_array.data(), d_f16_assemblefloat_array, test_size * sizeof(half), cudaMemcpyDeviceToHost); for (int i = 0; i < test_size; i++) { std::cout << std::hexfloat << f32_array[i] << ":\t(up:" << std::hexfloat << __half2float(__float2half_ru(f32_array[i])) << "\tdown:" << std::hexfloat << __half2float(__float2half_rd(f32_array[i])) << ") \tdirect: " << std::hexfloat << __half2float(f16_direct_array[i]) << "\tbitcarry: " << std::hexfloat << __half2float(f16_bitcarry_array[i]) << " \tshortrand: " << std::hexfloat << __half2float(f16_shortrand_array[i]) << " \tassemblefloat: " << std::hexfloat << __half2float(f16_assemblefloat_array[i]) << std::endl; } } cudaFree(d_f32_array); cudaFree(d_f16_direct_array); cudaFree(d_f16_bitcarry_array); cudaFree(d_f16_shortrand_array); cudaFree(d_f16_assemblefloat_array); return 0; }
the_stack
#include <gtest/gtest.h> #include <random> #include <utility> #include "dali/kernels/reduce/mean_stddev_gpu_impl.cuh" #include "dali/kernels/scratch.h" #include "dali/test/test_tensors.h" #include "dali/test/tensor_test_utils.h" #include "dali/core/tensor_shape_print.h" #include "dali/kernels/reduce/reduce_test.h" #include "dali/kernels/reduce/reduce_gpu_test.h" namespace dali { namespace kernels { namespace reduce_impl { template <typename Out, typename In, typename Mean> void CenterAndSquare(const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<Mean> mean, TensorShape<> &in_pos, TensorShape<> &mean_pos, int dim = 0) { int extent = in.shape[dim]; int dj = mean.shape[dim] > 1 ? 1 : 0; if (dim == in.dim() - 1) { const Mean *mean_ptr = mean(mean_pos); const In *in_ptr = in(in_pos); Out *out_ptr = out(in_pos); for (int i = 0, j = 0; i < extent; i++, j += dj) { double d = in_ptr[i] - mean_ptr[j]; out_ptr[i] = static_cast<Out>(d * d); } } else { for (int i = 0, j = 0; i < extent; i++, j += dj) { in_pos[dim] = i; mean_pos[dim] = j; CenterAndSquare(out, in, mean, in_pos, mean_pos, dim + 1); } } } template <typename Out = float, typename In, typename Mean> TestTensorList<Out> CenterAndSquare(const InListCPU<In> &in, const InListCPU<Mean> &mean) { TestTensorList<Out> out_tl; out_tl.reshape(in.shape); auto out = out_tl.cpu(); int N = in.num_samples(); for (int i = 0; i < N; i++) { auto in_tv = in[i]; auto out_tv = out[i]; auto mean_tv = mean.num_samples() > 1 ? mean[i] : mean[0]; TensorShape<> in_pos, mean_pos; in_pos.resize(in_tv.shape.size()); mean_pos.resize(in_tv.shape.size()); CenterAndSquare(out_tv, in_tv, mean_tv, in_pos, mean_pos); } return out_tl; } template <typename Out = float, typename In, typename Mean> TestTensorList<Out> RefStdDev(const TensorListView<StorageCPU, In> &in, const TensorListView<StorageCPU, Mean> &mean, int ddof = 0, double reg = 0, bool inv = false) { SmallVector<int, 6> axes; for (int d = 0; d < mean.sample_dim(); d++) { for (int i = 0; i < mean.num_samples(); i++) { if (mean.tensor_shape_span(i)[d] > 1) goto non_reduced; } axes.push_back(d); non_reduced:; // NOLINT } bool reduce_batch = mean.num_samples() == 1 && in.num_samples() > 1; using tmp_t = decltype(In() - Mean()); auto centered_squared = CenterAndSquare<tmp_t, In, Mean>(in, mean); auto centered_squared_cpu = centered_squared.cpu(); TestTensorList<Out> reduced_samples; const auto &out_shape = mean.shape; int N = in.num_samples(); if (reduce_batch) { assert(is_uniform(out_shape)); TensorListShape<> reduced_sample_shapes = uniform_list_shape(in.num_samples(), mean.shape[0]); reduced_samples.reshape(reduced_sample_shapes); auto reduced_samples_cpu = reduced_samples.cpu(); for (int i = 0; i < N; i++) { RefReduce(reduced_samples_cpu[i], centered_squared_cpu[i], make_span(axes), true, reductions::sum()); } TestTensorList<Out> out_tl; out_tl.reshape(out_shape); auto out = out_tl.cpu(); int64_t n = out_shape.num_elements(); double ratio = in.num_elements() / n - ddof; for (int j = 0; j < n; j++) { double sum = 0; for (int i = 0; i < N; i++) sum += reduced_samples_cpu.data[i][j]; out.data[0][j] = inv ? rsqrt(sum / ratio + reg) : std::sqrt(sum / ratio + reg); } return out_tl; } else { reduced_samples.reshape(out_shape); auto out = reduced_samples.cpu(); for (int i = 0; i < N; i++) { int64_t n = out[i].num_elements(); int64_t n_in = in[i].num_elements(); double ratio = n_in / n - ddof; RefReduce(out[i], centered_squared_cpu[i], make_span(axes), true, reductions::sum()); for (int j = 0; j < n; j++) { double x = out.data[i][j] / ratio + reg; out.data[i][j] = inv ? rsqrt(x) : std::sqrt(x); } } return reduced_samples; } } template <typename Acc, typename Out, typename In> void RefMean(const TensorListView<StorageCPU, Out> &out, const TensorListView<StorageCPU, In> &in, span<const int> axes, bool keep_dims, bool batch) { TestTensorList<Acc> sum; TensorListShape<> out_shape; CalculateReducedShape(out_shape, in.shape, axes, keep_dims, batch); sum.reshape(out_shape); auto sum_cpu = sum.cpu(); RefReduce(sum_cpu, in, axes, keep_dims, batch, reductions::sum()); assert(out.shape == out_shape); if (batch) { int64_t nin = in.num_elements(); int64_t nout = out.num_elements(); double ratio = nin / nout; // should be an integer, no cast required auto *optr = out.data[0]; auto *sptr = sum_cpu.data[0]; for (int i = 0; i < nout; i++) optr[i] = sptr[i] / ratio; } else { for (int s = 0; s < in.num_samples(); s++) { auto in_tv = in[s]; auto out_tv = out[s]; auto sum_tv = sum_cpu[s]; int64_t nin = in_tv.num_elements(); int64_t nout = out_tv.num_elements(); double ratio = nin / nout; // should be an integer, no cast required auto *optr = out_tv.data; auto *sptr = sum_tv.data; for (int i = 0; i < nout; i++) optr[i] = sptr[i] / ratio; } } } TEST(MeanImplGPU, SplitStage) { TensorListShape<> in_shape = {{ { 32, 2, 64000 }, { 15, 4, 128000 }, { 72000, 1, 7 } }}; TensorListShape<> ref_out_shape = {{ { 1, 2, 1 }, { 1, 4, 1 }, { 1, 1, 1 } }}; int axes[] = { 0, 2 }; testing::ReductionKernelTest<MeanImplGPU<float, uint8_t, uint64_t>, float, uint8_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), true, false); EXPECT_GE(test.kernel.GetNumStages(), 4); // both reduced axes must be split test.FillData(0, 255); test.Run(); RefMean<int64_t>(test.ref.cpu(), test.in.cpu(), make_span(axes), true, false); test.Check(EqualEpsRel(1e-5, 1e-6)); } } TEST(MeanImplGPU, BatchMean) { TensorListShape<> in_shape = {{ { 32, 3, 64000 }, { 15, 3, 128000 }, { 72000, 3, 7 } }}; TensorListShape<> ref_out_shape = {{ TensorShape<>{3} }}; int axes[] = { 0, 2 }; testing::ReductionKernelTest<MeanImplGPU<float, uint8_t, uint64_t>, float, uint8_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), false, true); EXPECT_GE(test.kernel.GetNumStages(), 4); // both reduced axes must be split test.FillData(0, 255); test.Run(); RefMean<int64_t>(test.ref.cpu(), test.in.cpu(), make_span(axes), false, true); test.Check(EqualEpsRel(1e-5, 1e-6)); } } TEST(StdDevImplGPU, Outer_Inner_SplitStage) { TensorListShape<> in_shape = {{ { 32, 2, 64000 }, { 15, 4, 128000 }, { 72000, 1, 7 } }}; TensorListShape<> ref_out_shape = {{ { 1, 2, 1 }, { 1, 4, 1 }, { 1, 1, 1 } }}; int axes[] = { 0, 2 }; TestTensorList<float> fake_mean; fake_mean.reshape(ref_out_shape); auto mean_cpu = fake_mean.cpu(); *mean_cpu[0](0, 0, 0) = 10; *mean_cpu[0](0, 1, 0) = 20; *mean_cpu[1](0, 0, 0) = 30; *mean_cpu[1](0, 1, 0) = 40; *mean_cpu[1](0, 2, 0) = 50; *mean_cpu[1](0, 3, 0) = 60; *mean_cpu[2](0, 0, 0) = 70; testing::ReductionKernelTest<StdDevImplGPU<float, int16_t>, float, int16_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), true, false); EXPECT_GE(test.kernel.GetNumStages(), 4); // both reduced axes must be split test.FillData(-100, 100); test.Run(fake_mean.gpu()); test.ref = RefStdDev(test.in.cpu(), mean_cpu); test.Check(EqualEpsRel(1e-5, 1e-6)); } } TEST(StdDevImplGPU, Middle_Inner_Sample) { TensorListShape<> in_shape = {{ { 4, 32, 1, 6400 }, { 3, 15, 2, 12800 }, { 2, 7200, 3, 7 } }}; TensorListShape<> ref_out_shape = {{ { 4, 1, 1, 1 }, { 3, 1, 2, 1 }, { 2, 1, 3, 1 } }}; int axes[] = { 1, 3 }; TestTensorList<float> fake_mean; fake_mean.reshape(ref_out_shape); auto mean_cpu = fake_mean.cpu(); for (int i = 0, n = mean_cpu.num_elements(); i < n; i++) { mean_cpu.data[0][i] = 10 * (i+1); } testing::ReductionKernelTest<StdDevImplGPU<float, int16_t>, float, int16_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), true, false); EXPECT_GE(test.kernel.GetNumStages(), 2); // both reduced axes must be split test.FillData(-100, 100); test.Run(fake_mean.gpu()); test.ref = RefStdDev(test.in.cpu(), mean_cpu); test.Check(EqualEpsRel(1e-5, 1e-6)); } } TEST(StdDevImplGPU, Middle_Inner_Batch) { TensorListShape<> in_shape = {{ { 2, 32, 3, 6400 }, { 2, 15, 3, 12800 }, { 2, 7200, 3, 7 } }}; TensorListShape<> ref_out_shape = {{ { 2, 1, 3, 1 } }}; int axes[] = { 1, 3 }; TestTensorList<float> fake_mean; fake_mean.reshape(ref_out_shape); auto mean_cpu = fake_mean.cpu(); *mean_cpu[0](0, 0, 0, 0) = 10; *mean_cpu[0](0, 0, 1, 0) = 20; *mean_cpu[0](0, 0, 2, 0) = 30; *mean_cpu[0](1, 0, 0, 0) = 40; *mean_cpu[0](1, 0, 1, 0) = 50; *mean_cpu[0](1, 0, 2, 0) = 60; testing::ReductionKernelTest<StdDevImplGPU<float, int16_t>, float, int16_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), true, true); EXPECT_GE(test.kernel.GetNumStages(), 2); // both reduced axes must be split test.FillData(-100, 100); test.Run(fake_mean.gpu()); test.ref = RefStdDev(test.in.cpu(), mean_cpu); test.Check(EqualEpsRel(1e-5, 1e-6)); } } TEST(InvStdDevImplGPU, Outer_Batch_Regularized) { TensorListShape<> in_shape = {{ { 480, 640, 3 }, { 720, 1280, 3 }, { 1080, 1920, 3 } }}; TensorListShape<> ref_out_shape = {{ { 1, 1, 3 } }}; int axes[] = { 0, 1 }; TestTensorList<float> fake_mean; fake_mean.reshape(ref_out_shape); auto mean_cpu = fake_mean.cpu(); for (int i = 0, n = mean_cpu.num_elements(); i < n; i++) { mean_cpu.data[0][i] = 10 * (i+1); } testing::ReductionKernelTest<InvStdDevImplGPU<float, int16_t>, float, int16_t> test; for (int iter = 0; iter < 3; iter++) { test.Setup(in_shape, ref_out_shape, make_span(axes), true, true); EXPECT_GE(test.kernel.GetNumStages(), 2); // both reduced axes must be split test.FillData(-100, 100); test.Run(fake_mean.gpu(), 1, 12000); test.ref = RefStdDev(test.in.cpu(), mean_cpu, 1, 12000, true); test.Check(EqualEpsRel(1e-5, 1e-6)); } } } // namespace reduce_impl } // namespace kernels } // namespace dali
the_stack
#define DEGREE 6 #include <types.h> #include <cutil.h> #include <error.h> #include <cusp/print.h> __device__ __constant__ CGType c_z_x[DEGREE]; __device__ __constant__ CGType c_z_y[DEGREE]; __device__ __constant__ CGType c_w_x[DEGREE]; __device__ __constant__ CGType c_w_y[DEGREE]; template<typename ValueType> __device__ __host__ ValueType forceFunction(ValueType x, ValueType y) { return 0.0; } template<typename IndexType, typename ValueType> __device__ void compute_stiffness_matrix(const ValueType* __restrict__ linearBaseCoeff, ValueType TArea, ValueType* __restrict__ stiffMat) { ValueType a1, a2, b1, b2; int cnt = 0; #pragma unroll for (int k = 0; k < 3; k++) { #pragma unroll for (int g = k; g < 3; g++) { a1 = linearBaseCoeff[3 * k + 0]; b1 = linearBaseCoeff[3 * k + 1]; a2 = linearBaseCoeff[3 * g + 0]; b2 = linearBaseCoeff[3 * g + 1]; stiffMat[cnt++] = (a1 * a2 + b1 * b2) * TArea; } } } template<typename ValueType> __device__ ValueType Integration_Quadrilateral(ValueType(*fx)[DEGREE]) { ValueType integral = 0; ValueType tmp_y; #pragma unroll for (int i = 0; i < DEGREE; i++) { tmp_y = 0.0; #pragma unroll for (int j = 0; j < DEGREE; j++) { tmp_y += fx[i][j] * c_w_y[j]; } integral += tmp_y * c_w_x[i]; } return integral; } template<typename IndexType, typename ValueType> __device__ void compute_massmatrix_vector(ValueType* __restrict__ vertX, ValueType* __restrict__ vertY, ValueType* __restrict__ linearBaseCoeff, ValueType* __restrict__ massMat, ValueType* __restrict__ ele_b) { ValueType x[DEGREE][DEGREE]; ValueType y[DEGREE][DEGREE]; #pragma unroll for (int m = 0; m < DEGREE; m++) { #pragma unroll for (int j = 0; j < DEGREE; j++) { x[m][j] = vertX[0] * (1 - c_z_x[m]) * 0.5 * (1 - c_z_y[j])*0.5 + vertX[1] * (1 + c_z_x[m])*0.5 * (1 - c_z_y[j])*0.5 + vertX[2] * (1 + c_z_y[j])*0.5; y[m][j] = vertY[0] * (1 - c_z_x[m]) * 0.5 * (1 - c_z_y[j])*0.5 + vertY[1] * (1 + c_z_x[m])*0.5 * (1 - c_z_y[j])*0.5 + vertY[2] * (1 + c_z_y[j])*0.5; } } ValueType a1, b1, c1, a2, b2, c2; ValueType integrandMass[DEGREE][DEGREE]; int Cnt = 0; ValueType jacobi = (vertX[0] * vertY[1] - vertX[1] * vertY[0] - vertX[0] * vertY[2] + vertX[2] * vertY[0] + vertX[1] * vertY[2] - vertX[2] * vertY[1]) / 8; #pragma unroll for (int k = 0; k < 3; k++) { #pragma unroll for (int g = k; g < 3; g++) { a1 = linearBaseCoeff[3 * k + 0]; b1 = linearBaseCoeff[3 * k + 1]; c1 = linearBaseCoeff[3 * k + 2]; a2 = linearBaseCoeff[3 * g + 0]; b2 = linearBaseCoeff[3 * g + 1]; c2 = linearBaseCoeff[3 * g + 2]; #pragma unroll for (int p = 0; p < DEGREE; p++) { #pragma unroll for (int q = 0; q < DEGREE; q++) { integrandMass[p][q] = (a1 * x[p][q] + b1 * y[p][q] + c1)*(a2 * x[p][q] + b2 * y[p][q] + c2) * jacobi; } } ValueType integralMass = Integration_Quadrilateral<ValueType >(integrandMass); massMat[Cnt++] = integralMass; } } ValueType(*integrandForce)[DEGREE]; integrandForce = integrandMass; Cnt = 0; #pragma unroll for (int k = 0; k < 3; k++) { a1 = linearBaseCoeff[3 * k + 0]; b1 = linearBaseCoeff[3 * k + 1]; c1 = linearBaseCoeff[3 * k + 2]; #pragma unroll for (int p = 0; p < DEGREE; p++) { #pragma unroll for (int q = 0; q < DEGREE; q++) { ValueType f = forceFunction<ValueType >(x[p][q], y[p][q]); integrandForce[p][q] = f * (a1 * x[p][q] + b1 * y[p][q] + c1) * jacobi; } } ValueType integralForce = Integration_Quadrilateral<ValueType >(integrandForce); ele_b[Cnt++] = integralForce; } } template <typename IndexType> __device__ int binarySearch(IndexType *indices, IndexType low, IndexType high, IndexType _val, const IndexType pitch) { IndexType retval = -1; intuint<IndexType> val; val.ival = _val; //printf("blockIdx: %d, threadIdx: %d, searching for val: %d\n",blockIdx.x,threadIdx.x,_val); while (high >= low) { IndexType mid = low + (high - low) / 2; intuint<IndexType> mval; mval.ival = indices[pitch * mid]; if (mval.uval > val.uval) high = mid - 1; else if (mval.uval < val.uval) low = mid + 1; else { retval = mid; break; } } //printf("blockIdx: %d, threadIdx: %d, loc: %d\n",blockIdx.x,threadIdx.x,retval); return retval; } __device__ double atomicAdd(double* address, double val) { unsigned long long int *address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template<typename IndexType, typename ValueType> __device__ void sum_into_global_linear_system_cuda(IndexType* __restrict__ ids, ValueType* __restrict__ stiffMat, ValueType* __restrict__ massMat, ValueType* __restrict__ ele_b, ValueType* __restrict__ d_ellvalues, IndexType* __restrict__ d_ellcolidx, size_t nrow, size_t num_col_per_row, size_t pitch, ValueType* __restrict__ d_b) { IndexType idxi = ids[0]; IndexType idxj = ids[1]; IndexType* mat_row_cols = &d_ellcolidx[idxi]; ValueType* mat_row_coefs = &d_ellvalues[idxi]; ValueType lambda = 1.0; ValueType coef = stiffMat[1] + lambda * massMat[1]; IndexType loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxj, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } mat_row_cols = &d_ellcolidx[idxj]; mat_row_coefs = &d_ellvalues[idxj]; loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } idxi = ids[0]; idxj = ids[2]; mat_row_cols = &d_ellcolidx[idxi]; mat_row_coefs = &d_ellvalues[idxi]; coef = stiffMat[2] + lambda * massMat[2]; loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxj, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } mat_row_cols = &d_ellcolidx[idxj]; mat_row_coefs = &d_ellvalues[idxj]; loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } idxi = ids[1]; idxj = ids[2]; mat_row_cols = &d_ellcolidx[idxi]; mat_row_coefs = &d_ellvalues[idxi]; coef = stiffMat[4] + lambda * massMat[4]; loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxj, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } mat_row_cols = &d_ellcolidx[idxj]; mat_row_coefs = &d_ellvalues[idxj]; loc = binarySearch<IndexType >(mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal if (loc >= 0) { atomicAdd(&mat_row_coefs[pitch * loc], coef); // mat_row_coefs[pitch * loc] += coef; } idxi = ids[0]; mat_row_cols = &d_ellcolidx[idxi]; mat_row_coefs = &d_ellvalues[idxi]; coef = stiffMat[0] + lambda * massMat[0]; atomicAdd(&mat_row_coefs[0], coef); // mat_row_coefs[0] += coef; idxi = ids[1]; mat_row_cols = &d_ellcolidx[idxi]; mat_row_coefs = &d_ellvalues[idxi]; coef = stiffMat[3] + lambda * massMat[3]; atomicAdd(&mat_row_coefs[0], coef); // mat_row_coefs[0] += coef; idxi = ids[2]; mat_row_cols = &d_ellcolidx[idxi]; mat_row_coefs = &d_ellvalues[idxi]; coef = stiffMat[5] + lambda * massMat[5]; atomicAdd(&mat_row_coefs[0], coef); // mat_row_coefs[0] += coef; // sum_into_vector atomicAdd(&d_b[ids[0]], ele_b[0]); atomicAdd(&d_b[ids[1]], ele_b[1]); atomicAdd(&d_b[ids[2]], ele_b[2]); } template<typename IndexType, typename ValueType> __global__ void element_loop_kernel(size_t nv, ValueType *d_nx, ValueType *d_ny, size_t ne, IndexType *d_tri0, IndexType *d_tri1, IndexType *d_tri2, ValueType *d_ellvalues, IndexType *d_ellcolidx, size_t nrow, size_t num_col_per_row, size_t pitch, ValueType *d_b) { ValueType coeffs[9]; ValueType stiffMat[6]; ValueType massMat[6]; ValueType ele_b[3]; IndexType ids[3]; ValueType x[3]; ValueType y[3]; for (int eleidx = blockIdx.x * blockDim.x + threadIdx.x; eleidx < ne; eleidx += blockDim.x * gridDim.x) { ids[0] = d_tri0[eleidx]; ids[1] = d_tri1[eleidx]; ids[2] = d_tri2[eleidx]; x[0] = d_nx[ids[0]]; x[1] = d_nx[ids[1]]; x[2] = d_nx[ids[2]]; y[0] = d_ny[ids[0]]; y[1] = d_ny[ids[1]]; y[2] = d_ny[ids[2]]; ValueType TArea = fabs( x[0] * y[2] - x[0] * y[1] + x[1] * y[0] - x[1] * y[2] + x[2] * y[1] - x[2] * y[0]) / 2.0; ValueType a11 = x[0], a12 = y[0], a13 = 1.0, a21 = x[1], a22 = y[1], a23 = 1.0, a31 = x[2], a32 = y[2], a33 = 1.0; ValueType det = a11 * a22 * a33 + a21 * a32 * a13 + a31 * a12 * a23 - a11 * a32 * a23 - a31 * a22 * a13 - a21 * a12 *a33; if (det == 0.0) { printf("det == 0 : %d\n", eleidx); } ValueType b11 = a22 * a33 - a23 * a32; ValueType b12 = a13 * a32 - a12 * a33; ValueType b13 = a12 * a23 - a13 * a22; ValueType b21 = a23 * a31 - a21 * a33; ValueType b22 = a11 * a33 - a13 * a31; ValueType b23 = a13 * a21 - a11 * a23; ValueType b31 = a21 * a32 - a22 * a31; ValueType b32 = a12 * a31 - a11 * a32; ValueType b33 = a11 * a22 - a12 * a21; coeffs[0] = b11 / det; coeffs[1] = b21 / det; coeffs[2] = b31 / det; coeffs[3] = b12 / det; coeffs[4] = b22 / det; coeffs[5] = b32 / det; coeffs[6] = b13 / det; coeffs[7] = b23 / det; coeffs[8] = b33 / det; //compute element stiffness matrix compute_stiffness_matrix<IndexType, ValueType >(coeffs, TArea, stiffMat); //compte element mass matrix and vector compute_massmatrix_vector<IndexType, ValueType >(x, y, coeffs, massMat, ele_b); sum_into_global_linear_system_cuda<IndexType, ValueType >(ids, stiffMat, massMat, ele_b, d_ellvalues, d_ellcolidx, nrow, num_col_per_row, pitch, d_b); } } template<typename IndexType, typename ValueType> __global__ void element_loop_coo_kernel(size_t nv, ValueType *d_nx, ValueType *d_ny, size_t ne, IndexType *d_tri0, IndexType *d_tri1, IndexType *d_tri2, IndexType *coorowidx, IndexType *coocolidx, ValueType *coovalues, ValueType *d_b) { } void perform_element_loop_2d(Vector_d_CG &nx, Vector_d_CG &ny, IdxVector_d &tri0, IdxVector_d &tri1, IdxVector_d &tri2, Matrix_ell_d_CG &A, Vector_d_CG &b, Vector_h_CG &z_x, Vector_h_CG &z_y, Vector_h_CG &weight_x, Vector_h_CG &weight_y) { typedef typename Matrix_ell_d_CG::index_type IndexType; typedef typename Matrix_ell_d_CG::value_type ValueType; int nv = nx.size(); int ne = tri0.size(); ValueType *d_b = thrust::raw_pointer_cast(&b[0]); ValueType *d_nx = thrust::raw_pointer_cast(&nx[0]); ValueType *d_ny = thrust::raw_pointer_cast(&ny[0]); IndexType *d_tri0 = thrust::raw_pointer_cast(&tri0[0]); IndexType *d_tri1 = thrust::raw_pointer_cast(&tri1[0]); IndexType *d_tri2 = thrust::raw_pointer_cast(&tri2[0]); ValueType *d_ellvalues = thrust::raw_pointer_cast(&A.values.values[0]); IndexType *d_ellcolidx = thrust::raw_pointer_cast(&A.column_indices.values[0]); ValueType *zx = thrust::raw_pointer_cast(&z_x[0]); ValueType *zy = thrust::raw_pointer_cast(&z_y[0]); ValueType *wx = thrust::raw_pointer_cast(&weight_x[0]); ValueType *wy = thrust::raw_pointer_cast(&weight_y[0]); size_t num_col_per_row = A.column_indices.num_cols; size_t pitch = A.column_indices.pitch; size_t nrow = A.num_rows; cudaSafeCall(cudaMemcpyToSymbol(c_z_x, zx, sizeof(ValueType) * z_x.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_z_y, zy, sizeof(ValueType) * z_y.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_w_x, wx, sizeof(ValueType) * weight_x.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_w_y, wy, sizeof(ValueType) * weight_y.size(), 0, cudaMemcpyHostToDevice)); int threads = 256; int num_blocks = std::min((int)ceil((double)ne / threads), 65535); //32 blocks per SM cudaThreadSetCacheConfig(cudaFuncCachePreferShared); //cudaThreadSetCacheConfig(cudaFuncCachePreferL1); //Now do the actual finite-element assembly loop: element_loop_kernel<IndexType, ValueType> << <num_blocks, threads >> >(nv, d_nx, d_ny, ne, d_tri0, d_tri1, d_tri2, d_ellvalues, d_ellcolidx, nrow, num_col_per_row, pitch, d_b); } template<typename IndexType, typename ValueType> __global__ void assemble2csr_kernel(const IndexType* __restrict__ column_indices, const ValueType* __restrict__ values, const IndexType* __restrict__ vert_indices, const IndexType* __restrict__ csr_row_offsets, ValueType* __restrict__ csr_values, int nv) { for (int vidx = blockIdx.x * blockDim.x + threadIdx.x; vidx < nv; vidx += gridDim.x * blockDim.x) { int start = vert_indices[vidx]; int end = vert_indices[vidx + 1]; int rowstart = csr_row_offsets[vidx]; int cnt = 0; csr_values[rowstart] += values[start]; for (int i = start + 1; i < end; i++) { ValueType v = values[i]; if (column_indices[i] == column_indices[i - 1]) { csr_values[rowstart + cnt] += v; } else { cnt++; csr_values[rowstart + cnt] += v; } } } } void perform_element_loop_2d_coo(Vector_d_CG &nx, Vector_d_CG &ny, IdxVector_d &tri0, IdxVector_d &tri1, IdxVector_d &tri2, Matrix_d_CG &A, Vector_d_CG &b, Vector_h_CG &z_x, Vector_h_CG &z_y, Vector_h_CG &weight_x, Vector_h_CG & weight_y) { typedef typename Matrix_d_CG::index_type IndexType; typedef typename Matrix_d_CG::value_type ValueType; int nv = nx.size(); int ne = tri0.size(); Matrix_coo_d_CG Aout(nv, nv, 6 * ne); ValueType *d_b = thrust::raw_pointer_cast(&b[0]); ValueType *d_nx = thrust::raw_pointer_cast(&nx[0]); ValueType *d_ny = thrust::raw_pointer_cast(&ny[0]); IndexType *d_tri0 = thrust::raw_pointer_cast(&tri0[0]); IndexType *d_tri1 = thrust::raw_pointer_cast(&tri1[0]); IndexType *d_tri2 = thrust::raw_pointer_cast(&tri2[0]); IndexType *d_coorowidx = thrust::raw_pointer_cast(&Aout.row_indices[0]); IndexType *d_coocolidx = thrust::raw_pointer_cast(&Aout.column_indices[0]); ValueType *d_coovalues = thrust::raw_pointer_cast(&Aout.values[0]); ValueType *zx = thrust::raw_pointer_cast(&z_x[0]); ValueType *zy = thrust::raw_pointer_cast(&z_y[0]); ValueType *wx = thrust::raw_pointer_cast(&weight_x[0]); ValueType *wy = thrust::raw_pointer_cast(&weight_y[0]); cudaSafeCall(cudaMemcpyToSymbol(c_z_x, zx, sizeof(ValueType) * z_x.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_z_y, zy, sizeof(ValueType) * z_y.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_w_x, wx, sizeof(ValueType) * weight_x.size(), 0, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpyToSymbol(c_w_y, wy, sizeof(ValueType) * weight_y.size(), 0, cudaMemcpyHostToDevice)); int threads = 256; int num_blocks = std::min((int)ceil((double)ne / threads), 65535); //32 blocks per SM // cudaThreadSetCacheConfig(cudaFuncCachePreferShared); cudaThreadSetCacheConfig(cudaFuncCachePreferL1); //Now do the actual finite-element assembly loop: element_loop_coo_kernel<IndexType, ValueType> << <num_blocks, threads >> >(nv, d_nx, d_ny, ne, d_tri0, d_tri1, d_tri2, d_coorowidx, d_coocolidx, d_coovalues, d_b); Aout.sort_by_row_and_column(); // cusp::print(Aout); cusp::array1d<int, cusp::device_memory> flags(6 * ne, 1); cusp::array1d<int, cusp::device_memory> keyoutput(nv + 1); cusp::array1d<int, cusp::device_memory> valoutput(nv); int* flagtmp = thrust::raw_pointer_cast(&flags[0]); int* keytmp = thrust::raw_pointer_cast(&keyoutput[0]); int* valtmp = thrust::raw_pointer_cast(&valoutput[0]); int* rtmp = thrust::raw_pointer_cast(&Aout.row_indices[0]); thrust::reduce_by_key(Aout.row_indices.begin(), Aout.row_indices.end(), flags.begin(), keyoutput.begin(), valoutput.begin()); keyoutput.resize(nv + 1); keyoutput[0] = 0; thrust::inclusive_scan(valoutput.begin(), valoutput.end(), keyoutput.begin() + 1); num_blocks = std::min((int)ceil((double)nv / threads), 65535); assemble2csr_kernel<IndexType, ValueType> << <num_blocks, threads >> >(thrust::raw_pointer_cast(&Aout.column_indices[0]), thrust::raw_pointer_cast(&Aout.values[0]), thrust::raw_pointer_cast(&keyoutput[0]), thrust::raw_pointer_cast(&A.row_offsets[0]), thrust::raw_pointer_cast(&A.values[0]), nv); Aout.resize(0, 0, 0); flags.resize(0); keyoutput.resize(0); valoutput.resize(0); } #endif
the_stack
namespace RPU { /******************************************************************************************/ /* PulsedRPUDeviceCuda Base class which maintains the basic hard bounds and dw_min up/down and decays etc for the pulsed updates. Note that it is still Abstract. Need to implement the getUpdateKernels in derived. */ template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(CudaContext *c, int x_size, int d_size) : PulsedRPUDeviceCudaBase<T>(c, x_size, d_size){}; template <typename T> void PulsedRPUDeviceCuda<T>::initialize() { dev_4params_ = RPU::make_unique<CudaArray<float>>(this->context_, 4 * this->size_); dev_decay_scale_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_diffusion_rate_ = nullptr; // on the fly dev_reset_bias_ = nullptr; dev_persistent_weights_ = nullptr; this->context_->synchronize(); }; template <typename T> PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(const PulsedRPUDeviceCuda<T> &other) : PulsedRPUDeviceCudaBase<T>(other) { initialize(); dev_4params_->assign(*other.dev_4params_); dev_decay_scale_->assign(*other.dev_decay_scale_); if (other.dev_diffusion_rate_ != nullptr) { dev_diffusion_rate_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_diffusion_rate_->assign(*other.dev_diffusion_rate_); } if (other.dev_reset_bias_ != nullptr) { dev_reset_bias_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_reset_bias_->assign(*other.dev_reset_bias_); } if (other.dev_persistent_weights_ != nullptr) { dev_persistent_weights_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); dev_persistent_weights_->assign(*other.dev_persistent_weights_); } this->context_->synchronize(); }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(const PulsedRPUDeviceCuda<T>& other){ // PulsedRPUDeviceCuda<T> tmp(other); // swap(*this,tmp); // return *this; // }; // template <typename T> // PulsedRPUDeviceCuda<T>::PulsedRPUDeviceCuda(PulsedRPUDeviceCuda<T>&& other) { // *this = std::move(other); // }; // template <typename T> // PulsedRPUDeviceCuda<T>& PulsedRPUDeviceCuda<T>::operator=(PulsedRPUDeviceCuda<T>&& other){ // PulsedRPUDeviceCudaBase<T>::operator=(std::move(other)); // dev_4params_ = std::move(other.dev_4params_); // dev_diffusion_rate_ = std::move(other.dev_diffusion_rate_); // dev_reset_bias_ = std::move(other.dev_reset_bias_); // dev_decay_scale_ = std::move(other.dev_decay_scale_); // dev_persistent_weights_ = std::move(other.dev_persistent_weights_); // return *this; // }; template <typename T> void PulsedRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) { const auto &rpu_device = dynamic_cast<const PulsedRPUDevice<T> &>(rpu_device_in); if (&rpu_device == nullptr) { RPU_FATAL("populateFrom expects PulsedRPUDevice."); } int x_size = rpu_device.getXSize(); int d_size = rpu_device.getDSize(); int size = x_size * d_size; initialize(); PulsedRPUDeviceCudaBase<T>::populateFrom(rpu_device_in); // copy RPU to device variables float *tmp = new float[4 * size]; T *tmp_ds = new T[size]; T *tmp_df = new T[size]; T *tmp_rb = new T[size]; T *tmp_pw = new T[size]; T *mn = rpu_device.getMinBound()[0]; T *mx = rpu_device.getMaxBound()[0]; T *su = rpu_device.getScaleUp()[0]; T *sd = rpu_device.getScaleDown()[0]; T *ds = rpu_device.getDecayScale()[0]; T *df = rpu_device.getDiffusionRate()[0]; T *rb = rpu_device.getResetBias()[0]; T *pw = rpu_device.getPersistentWeights()[0]; bool with_diffusion = false; bool with_reset_bias = false; for (int i = 0; i < d_size; ++i) { for (int j = 0; j < x_size; ++j) { int l_t = j * (d_size) + i; int l = i * (x_size) + j; // transposed: col major required by cuBLAS .. linear arangmenet for now int k = j * (d_size * 4) + 4 * i; tmp[k] = mn[l]; tmp[k + 1] = sd[l]; tmp[k + 2] = mx[l]; tmp[k + 3] = su[l]; tmp_ds[l_t] = ds[l]; tmp_df[l_t] = df[l]; tmp_rb[l_t] = rb[l]; tmp_pw[l_t] = pw[l]; if (df[l] != 0.0) { with_diffusion = true; } if (rb[l] != 0.0) { with_reset_bias = true; } } } dev_4params_->assign(tmp); dev_decay_scale_->assign(tmp_ds); // other parameters (on the fly) if (with_diffusion) { dev_diffusion_rate_ = RPU::make_unique<CudaArray<T>>(this->context_, size); dev_diffusion_rate_->assign(tmp_df); } if (with_reset_bias) { dev_reset_bias_ = RPU::make_unique<CudaArray<T>>(this->context_, size); dev_reset_bias_->assign(tmp_rb); } if (getPar().usesPersistentWeight()) { dev_persistent_weights_ = RPU::make_unique<CudaArray<T>>(this->context_, size); dev_persistent_weights_->assign(tmp_pw); } this->context_->synchronize(); delete[] tmp_ds; delete[] tmp_df; delete[] tmp_rb; delete[] tmp_pw; delete[] tmp; } template <typename T> void PulsedRPUDeviceCuda<T>::applyWeightUpdate(T *weights, T *dw_and_current_weight_out) { if (getPar().usesPersistentWeight()) { RPU_FATAL("ApplyWeightUpdate is not supported with write_noise_std>0!"); } RPU::math::elemaddcopysat<T>( this->context_, weights, dw_and_current_weight_out, this->size_, dev_4params_->getDataConst()); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, T alpha, bool bias_no_decay) { T *w = getPar().usesPersistentWeight() ? dev_persistent_weights_->getData() : weights; RPU::math::elemscalealpha<T>( this->context_, w, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData(), alpha, dev_reset_bias_ != nullptr ? dev_reset_bias_->getData() : nullptr); applyUpdateWriteNoise(weights); } template <typename T> void PulsedRPUDeviceCuda<T>::decayWeights(T *weights, bool bias_no_decay) { const auto &par = getPar(); T *w = par.usesPersistentWeight() ? dev_persistent_weights_->getData() : weights; RPU::math::elemscale<T>( this->context_, w, bias_no_decay ? MAX(this->size_ - this->d_size_, 0) : this->size_, dev_decay_scale_->getData(), dev_4params_->getData(), dev_reset_bias_ != nullptr ? dev_reset_bias_->getData() : nullptr); applyUpdateWriteNoise(weights); } template <typename T> void PulsedRPUDeviceCuda<T>::driftWeights(T *weights, T time_since_last_call) { T *w = getPar().usesPersistentWeight() ? dev_persistent_weights_->getData() : weights; PulsedRPUDeviceCudaBase<T>::driftWeights(w, time_since_last_call); this->wdrifter_cuda_->saturate(w, dev_4params_->getData()); applyUpdateWriteNoise(weights); } template <typename T> void PulsedRPUDeviceCuda<T>::diffuseWeights(T *weights) { if (dev_diffusion_rate_ == nullptr) { return; // no diffusion } T *w = getPar().usesPersistentWeight() ? dev_persistent_weights_->getData() : weights; if (this->dev_diffusion_nrnd_ == nullptr) { this->initDiffusionRnd(); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } this->rnd_context_->synchronize(); RPU::math::elemasb02<T>( this->context_, w, this->size_, this->dev_diffusion_nrnd_->getData(), dev_diffusion_rate_->getData(), dev_4params_->getData()); this->rnd_context_->recordWaitEvent(this->context_->getStream()); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); // Note: write noise will use the same rand to save memory. If // diffusion + writenoise is often needed one might want to add an // extra variable for the random numbers applyUpdateWriteNoise(weights); } template <typename T> void PulsedRPUDeviceCuda<T>::clipWeights(T *weights, T clip) { T *w = getPar().usesPersistentWeight() ? dev_persistent_weights_->getData() : weights; RPU::math::elemsat<T>(this->context_, w, this->size_, dev_4params_->getData()); if (clip >= 0) { RPU::math::aclip<T>(this->context_, w, this->size_, clip); } applyUpdateWriteNoise(weights); } template <typename T> void PulsedRPUDeviceCuda<T>::initResetRnd() { if (this->rnd_context_ == nullptr) { this->initRndContext(); } dev_reset_nrnd_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_ = std::unique_ptr<CudaArray<float>>( new CudaArray<float>(&*this->rnd_context_, (this->size_ + 31) / 32 * 32)); dev_reset_flag_->setConst(0); this->rnd_context_->synchronize(); } template <typename T> void PulsedRPUDeviceCuda<T>::applyUpdateWriteNoise(T *dev_weights) { const auto &par = getPar(); if (!par.usesPersistentWeight()) { return; } // re-uses the diffusion rnd if (this->dev_diffusion_nrnd_ == nullptr) { this->initDiffusionRnd(); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } this->rnd_context_->synchronize(); RPU::math::elemweightedsum<T>( this->context_, dev_weights, this->size_, dev_persistent_weights_->getData(), (T)1.0, this->dev_diffusion_nrnd_->getData(), par.write_noise_std); this->rnd_context_->recordWaitEvent(this->context_->getStream()); this->rnd_context_->randNormal( this->dev_diffusion_nrnd_->getData(), this->dev_diffusion_nrnd_->getSize()); } template <typename T> void PulsedRPUDeviceCuda<T>::resetAt(T *dev_weights, const char *dev_non_zero_msk) { const auto &par = getPar(); if (par.usesPersistentWeight()) { RPU_FATAL("ResetAt is not supported with write_noise_std>0!"); } RPU::math::elemresetsatmsk<T>( this->context_, dev_weights, this->size_, dev_non_zero_msk, dev_reset_bias_ == nullptr ? nullptr : dev_reset_bias_->getDataConst(), par.reset_std, dev_4params_->getData()); } template <typename T> void PulsedRPUDeviceCuda<T>::resetCols(T *weights, int start_col, int n_cols, T reset_prob) { // col-major in CUDA. if (dev_reset_bias_ == nullptr) { return; // no reset } if (getPar().usesPersistentWeight()) { RPU_FATAL("ResetCols is not supported with write_noise_std>0!"); } if (dev_reset_nrnd_ == nullptr) { initResetRnd(); } int n = n_cols * this->d_size_; int offset = start_col * this->d_size_; this->rnd_context_->randNormal( dev_reset_nrnd_->getData(), n_cols * this->d_size_, 0.0, getPar().reset_std); if (reset_prob < 1) { this->rnd_context_->randUniform(dev_reset_flag_->getData(), n_cols * this->d_size_); } this->context_->recordWaitEvent(this->rnd_context_->getStream()); if (n >= this->size_) { // reset whole matrix RPU::math::elemresetsat<T>( this->context_, weights, this->size_, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData()); } else if (offset + n <= this->size_) { // one pass enough RPU::math::elemresetsat<T>( this->context_, weights + offset, n, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); } else { // two passes int m = this->size_ - offset; RPU::math::elemresetsat<T>( this->context_, weights + offset, m, dev_reset_bias_->getDataConst() + offset, dev_reset_nrnd_->getDataConst(), dev_reset_flag_->getDataConst(), reset_prob, dev_4params_->getData() + 4 * offset); RPU::math::elemresetsat<T>( this->context_, weights, n - m, dev_reset_bias_->getDataConst(), dev_reset_nrnd_->getDataConst() + m, dev_reset_flag_->getDataConst() + m, reset_prob, dev_4params_->getData()); } } template <typename T> void PulsedRPUDeviceCuda<T>::runUpdateKernel( pwukp_t<T> kpars, CudaContext *c, T *dev_weights, int m_batch, const BitLineMaker<T> *blm, const PulsedUpdateMetaParameter<T> &up, const T lr, curandState_t *dev_states, int one_sided, uint32_t *x_counts_chunk, uint32_t *d_counts_chunk) { kpars->run( c->getStream(), dev_weights, m_batch, blm, this, up, dev_states, one_sided, x_counts_chunk, d_counts_chunk); } template class PulsedRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class PulsedRPUDeviceCuda<double>; #endif } // namespace RPU
the_stack
using isce3::core::Vec3; namespace isce3 { namespace cuda { namespace core { //Helper for the host side function - used only for testing __global__ void forward_g(int code, ProjectionBase** base, const double *inpts, double *outpts, int *flags) { if (threadIdx.x == 0 && blockIdx.x == 0) { (*base) = createProj(code); flags[0] = (*base)->forward(*(Vec3*) inpts, *(Vec3*) outpts); delete *base; } } //Helper for the host side function - used only for testing __global__ void inverse_g(int code, ProjectionBase **base, const double *inpts, double *outpts, int *flags) { if (threadIdx.x == 0 && blockIdx.x == 0) { (*base) = createProj(code); flags[0] = (*base)->inverse(*(Vec3*) inpts, *(Vec3*) outpts); delete *base; } } __host__ int ProjectionBase::forward_h(const Vec3& llh, Vec3& xyz) const { /* * This is to transfrom from LLH to requested projection system on the host. */ double *llh_d, *xyz_d; int *flag_d; ProjectionBase **base_d; checkCudaErrors( cudaMalloc((int**)&flag_d, 1*sizeof(int))); checkCudaErrors( cudaMalloc((double**)&llh_d,3*sizeof(double))); checkCudaErrors( cudaMalloc((double**)&xyz_d,3*sizeof(double))); checkCudaErrors( cudaMalloc(&base_d, sizeof(ProjectionBase**))); checkCudaErrors( cudaMemcpy(llh_d, llh.data(), 3*sizeof(double), cudaMemcpyHostToDevice)); //Call the global function with a single thread forward_g<<<1,1>>>(_epsgcode, base_d, llh_d, xyz_d, flag_d); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors( cudaMemcpy(xyz.data(), xyz_d, 3*sizeof(double), cudaMemcpyDeviceToHost)); int status; checkCudaErrors( cudaMemcpy(&status, flag_d, sizeof(int), cudaMemcpyDeviceToHost)); //Clean up checkCudaErrors( cudaFree(llh_d)); checkCudaErrors( cudaFree(xyz_d)); checkCudaErrors( cudaFree(flag_d)); checkCudaErrors( cudaFree(base_d)); return status; } __host__ int ProjectionBase::inverse_h(const Vec3& xyz, Vec3& llh) const { /* * This is to transfrom from requested projection system to LLH on the host. */ double *llh_d, *xyz_d; int *flag_d; ProjectionBase **base_d; checkCudaErrors( cudaMalloc((int**)&flag_d, sizeof(int))); checkCudaErrors( cudaMalloc((double**)&llh_d,3*sizeof(double))); checkCudaErrors( cudaMalloc((double**)&xyz_d,3*sizeof(double))); checkCudaErrors( cudaMalloc(&base_d, sizeof(ProjectionBase**))); checkCudaErrors( cudaMemcpy(xyz_d, xyz.data(), 3*sizeof(double), cudaMemcpyHostToDevice)); //Call the global function with a single thread inverse_g<<<1,1>>>(_epsgcode*1, base_d, xyz_d, llh_d, flag_d); checkCudaErrors( cudaDeviceSynchronize()); checkCudaErrors( cudaMemcpy(llh.data(), llh_d, 3*sizeof(double), cudaMemcpyDeviceToHost)); int status; checkCudaErrors( cudaMemcpy(&status, flag_d, sizeof(int), cudaMemcpyDeviceToHost)); //Clean up checkCudaErrors( cudaFree(llh_d)); checkCudaErrors( cudaFree(xyz_d)); checkCudaErrors( cudaFree(flag_d)); checkCudaErrors( cudaFree(base_d)); return status; } CUDA_DEV int LonLat::forward(const Vec3& in, Vec3& out) const { /* * Transforms Lon/Lat from radians to degrees. */ out[0] = in[0] * 180.0/M_PI; out[1] = in[1] * 180.0/M_PI; out[2] = in[2]; return 0; } CUDA_DEV int LonLat::inverse(const Vec3& in, Vec3& out) const { /* * Transforms Lon/Lat from degrees to radians. */ out[0] = in[0] * M_PI/180.0; out[1] = in[1] * M_PI/180.0; out[2] = in[2]; return 0; } CUDA_DEV int Geocent::forward(const Vec3& in, Vec3& out) const { /* * Same as Ellipsoid::lonLatToXyz. */ ellipse.lonLatToXyz(in, out); return 0; } CUDA_DEV int Geocent::inverse(const Vec3& in, Vec3& out) const { /* * Same as Ellipsoid::xyzToLonLat */ ellipse.xyzToLonLat(in, out); return 0; } CUDA_HOSTDEV double clens(const double* a, int size, double real) { /* * Local function - Compute the real clenshaw summation. Also computes Gaussian latitude for * some B as clens(a, len(a), 2*B) + B. * * NOTE: The implementation here has been modified to allow for encapsulating the gatg() * implementation, as well as to make the implementation details much clearer/cleaner. */ const double *p; double hr, hr1, hr2; for (p = a + size, hr2 = 0., hr1 = *(--p), hr=0.; a - p; hr2 = hr1, hr1 = hr) { hr = -hr2 + (2. * hr1 * cos(real)) + *(--p); } return sin(real) * hr; } CUDA_HOSTDEV double clenS(const double *a, int size, double real, double imag, double &R, double &I) { /* * Local function - Compute the complex clenshaw summation. * * NOTE: The implementation here has been modified to match the modified implementation of the * real clenshaw summation above. As expected with complex->real behavior, if imag == 0, * then I == 0 on return regardless of other inputs (so maybe we just implement * clenS(a,len(a),real,0,_,_) for clens(a,len(a),real) to simplify the code space?) */ const double *p; double hr, hr1, hr2, hi, hi1, hi2; for (p = a + size, hr2 = 0., hi2 = 0., hi1 = 0., hr1 = *(--p), hi1 = 0., hr = 0., hi = 0.; a - p; hr2 = hr1, hi2 = hi1, hr1 = hr, hi1 = hi) { hr = -hr2 + (2. * hr1 * cos(real) * cosh(imag)) - (-2. * hi1 * sin(real) * sinh(imag)) + *(--p); hi = -hi2 + (-2. * hr1 * sin(real) * sinh(imag)) + (2. * hi1 * cos(real) * cosh(imag)); } // Bad practice - Should *either* modify R in-place *or* return R, not both. I is modified, but // not returned. Since R and I are tied, we should either return a pair<,>(,) or modify // in-place, not mix the strategies R = (sin(real) * cosh(imag) * hr) - (cos(real) * sinh(imag) * hi); I = (sin(real) * cosh(imag) * hi) + (cos(real) * sinh(imag) * hr); return R; } CUDA_HOSTDEV UTM::UTM(int code) : ProjectionBase(code) { /* * Value constructor, delegates to base constructor before continuing with UTM-specific setup * code (previously contained in a private _setup() method but moved given that _setup() was * not supposed to be callable after construction). */ if ((_epsgcode > 32600) && (_epsgcode <= 32660)) { zone = _epsgcode - 32600; isnorth = true; } else if ((_epsgcode > 32700) && (_epsgcode <= 32760)) { zone = _epsgcode - 32700; isnorth = false; } else { //Error handling delegated to CPU side } lon0 = ((zone - 0.5) * (M_PI / 30.)) - M_PI; // Ellipsoid flattening double f = ellipse.e2() / (1. + sqrt(1 - ellipse.e2())); // Third flattening double n = f / (2. - f); // Gaussian -> Geodetic == cgb // Geodetic -> Gaussian == cbg cgb[0] = n * (2 + n * ((-2./3.) + n * (-2 + n * ((116./45.) + n * ((26./45.) + n * (-2854./675.)))))); cbg[0] = n * (-2 + n * ((2./3.) + n * ((4./3.) + n * ((-82./45.) + n * ((32./45.) + n * (4642./4725.)))))); cgb[1] = pow(n,2) * ((7./3.) + n * ((-8./5.) + n * ((-227./45.) + n * ((2704./315.) + n * (2323./945.))))); cbg[1] = pow(n,2) * ((5./3.) + n * ((-16./15.) + n * ((-13./9.) + n * ((904./315.) + n * (-1522./945.))))); cgb[2] = pow(n,3) * ((56./15.) + n * ((-136./35.) + n * ((-1262./105.) + n * (73814./2835.)))); cbg[2] = pow(n,3) * ((-26./15.) + n * ((34./21.) + n * ((8./5.) + n * (-12686./2835.)))); cgb[3] = pow(n,4) * ((4279./630.) + n * ((-332./35.) + n * (-399572/14175.))); cbg[3] = pow(n,4) * ((1237./630.) + n * ((-12./5.) + n * (-24832./14175.))); cgb[4] = pow(n,5) * ((4174./315.) + n * (-144838./6237.)); cbg[4] = pow(n,5) * ((-734./315.) + n * (109598./31185.)); cgb[5] = pow(n,6) * (601676./22275.); cbg[5] = pow(n,6) * (444337./155925.); // We have fixed k0 = 0.9996 here. This is standard for WGS84 zones. Proj4 allows this to be // changed for custom definitions. We plan to support standard definitions only. Qn = (0.9996 / (1. + n)) * (1. + n * n * ((1./4.) + n * n * ((1./64.) + ((n * n) / 256.)))); // Elliptical N,E -> Spherical N,E == utg // Spherical N,E -> Elliptical N,E == gtu utg[0] = n * (-.5 + n * ((2./3.) + n * ((-37./96.) + n * ((1./360.) + n * ((81./512.) + n * (-96199./604800.)))))); gtu[0] = n * (.5 + n * ((-2./3.) + n * ((5./16.) + n * ((41./180.) + n * ((-127./288.) + n * (7891./37800.)))))); utg[1] = pow(n,2) * ((-1./48.) + n * ((-1./15.) + n * ((437./1440.) + n * ((-46./105.) + n * (1118711./3870720.))))); gtu[1] = pow(n,2) * ((13./48.) + n * ((-3./5.) + n * ((557./1440.) + n * ((281./630.) + n * (-1983433./1935360.))))); utg[2] = pow(n,3) * ((-17./480.) + n * ((37./840.) + n * ((209./4480.) + n * (-5569./90720.)))); gtu[2] = pow(n,3) * ((61./240.) + n * ((-103./140.) + n * ((15061./26880.) + n * (167603./181440.)))); utg[3] = pow(n,4) * ((-4397./161280.) + n * ((11./504.) + n * (830251./7257600.))); gtu[3] = pow(n,4) * ((49561./161280.) + n * ((-179./168.) + n * (6601661./7257600.))); utg[4] = pow(n,5) * ((-4583./161280.) + n * (108847./3991680.)); gtu[4] = pow(n,5) * ((34729./80640.) + n * (-3418889./1995840.)); utg[5] = pow(n,6) * (-20648693./638668800.); gtu[5] = pow(n,6) * (212378941./319334400.); // Gaussian latitude of origin latitude // JC - clens(_,_,0.) is always 0, should we hardcode/eliminate this? double Z = clens(cbg, 6, 0.); Zb = -Qn * (Z + clens(gtu, 6, 2*Z)); } CUDA_DEV int UTM::forward(const Vec3& llh, Vec3& utm) const { /* * Transform from LLH to UTM. */ // Elliptical Lat, Lon -> Gaussian Lat, Lon double gauss = clens(cbg, 6, 2.*llh[1]) + llh[1]; // Adjust longitude for zone offset double lam = llh[0] - lon0; // Account for longitude and get Spherical N,E double Cn = atan2(sin(gauss), cos(lam)*cos(gauss)); double Ce = atan2(sin(lam)*cos(gauss), hypot(sin(gauss), cos(gauss)*cos(lam))); //Spherical N,E to Elliptical N,E Ce = asinh(tan(Ce)); double dCn, dCe; Cn += clenS(gtu, 6, 2*Cn, 2*Ce, dCn, dCe); Ce += dCe; if (fabs(Ce) <= 2.623395162778) { utm[0] = (Qn * Ce * ellipse.a()) + 500000.; utm[1] = (((Qn * Cn) + Zb) * ellipse.a()) + (isnorth ? 0. : 10000000.); // UTM is lateral projection only, height is pass through. utm[2] = llh[2]; return 0; } else { return 1; } } CUDA_DEV int UTM::inverse(const Vec3& utm, Vec3& llh) const { /* * Transform from UTM to LLH. */ double Cn = (utm[1] - (isnorth ? 0. : 10000000.)) / ellipse.a(); double Ce = (utm[0] - 500000.) / ellipse.a(); //Normalize N,E to Spherical N,E Cn = (Cn - Zb) / Qn; Ce /= Qn; if (fabs(Ce) <= 2.623395162778) { //N,E to Spherical Lat, Lon double dCn, dCe; Cn += clenS(utg, 6, 2*Cn, 2*Ce, dCn, dCe); Ce = atan(sinh(Ce + dCe)); //Spherical Lat, Lon to Gaussian Lat, Lon double sinCe = sin(Ce); double cosCe = cos(Ce); Ce = atan2(sinCe, cosCe*cos(Cn)); Cn = atan2(sin(Cn)*cosCe, hypot(sinCe, cosCe*cos(Cn))); //Gaussian Lat, Lon to Elliptical Lat, Lon llh[0] = Ce + lon0; llh[1] = clens(cgb, 6, 2*Cn) + Cn; //UTM is a lateral projection only. Height is pass through. llh[2] = utm[2]; return 0; } else { return 1; } } CUDA_HOSTDEV double pj_tsfn(double phi, double sinphi, double e) { /* * Local function - Determine small t from PROJ.4. */ sinphi *= e; return tan(.5 * ((.5*M_PI) - phi)) / pow((1. - sinphi) / (1. + sinphi), .5*e); } CUDA_HOSTDEV PolarStereo::PolarStereo(int code) : ProjectionBase(code) { /* * Set up various parameters for polar stereographic projection. Currently only EPSG:3031 * (Antarctic) and EPSG:3413 (Greenland) are supported. */ if (_epsgcode == 3031) { isnorth = false; // Only need absolute value lat_ts = (71. * M_PI) / 180.; lon0 = 0.; } else if (_epsgcode == 3413) { isnorth = true; lat_ts = 70. * (M_PI / 180.); lon0 = -45. * (M_PI / 180.); } else { //Need to figure out a way to throw error on device //Currently, delegated to CPU side } e = sqrt(ellipse.e2()); akm1 = cos(lat_ts) / pj_tsfn(lat_ts, sin(lat_ts), e); akm1 *= ellipse.a() / sqrt(1. - (pow(e,2) * pow(sin(lat_ts),2))); } CUDA_DEV int PolarStereo::forward(const Vec3& llh, Vec3& out) const{ /** * Host / Device forward projection function. */ double lam = llh[0] - lon0; double phi = llh[1] * (isnorth ? 1. : -1.); double temp = akm1 * pj_tsfn(phi, sin(phi), e); out[0] = temp * sin(lam); out[1] = -temp * cos(lam) * (isnorth ? 1. : -1.); //Height is just pass through out[2] = llh[2]; return 0; } CUDA_DEV int PolarStereo::inverse(const Vec3& ups, Vec3& llh) const { /** * Host / Device inverse projection function. */ double tp = -hypot(ups[0], ups[1])/akm1; double fact = (isnorth)?1:-1; double phi_l = (.5*M_PI) - (2. * atan(tp)); double sinphi; double phi = 0.; for(int i=8; i--; phi_l = phi) { sinphi = e * sin(phi_l); phi = 2. * atan(tp * pow((1. + sinphi) / (1. - sinphi), -0.5*e)) +0.5 * M_PI; if (fabs(phi_l - phi) < 1.e-10) { llh[0] = ((ups[0] == 0.) && (ups[1] == 0.)) ? 0. : atan2(ups[0], -fact*ups[1]) + lon0; llh[1] = phi*fact; llh[2] = ups[2]; return 0; } } return 1; } CUDA_HOSTDEV double pj_qsfn(double sinphi, double e, double one_es) { /* * Local function - ??? */ double con = e * sinphi; return one_es * ((sinphi / (1. - pow(con,2))) - ((.5 / e) * log((1. - con) / (1. + con)))); } CUDA_HOSTDEV CEA::CEA() : ProjectionBase(6933) { /* * Set up parameters for equal area projection. */ lat_ts = M_PI / 6.; k0 = cos(lat_ts) / sqrt(1. - (ellipse.e2() * pow(sin(lat_ts),2))); e = sqrt(ellipse.e2()); one_es = 1. - ellipse.e2(); apa[0] = ellipse.e2() * ((1./3.) + (ellipse.e2() * ((31./180.) + (ellipse.e2() * (517./5040.))))); apa[1] = pow(ellipse.e2(),2) * ((23./360.) + (ellipse.e2() * (251./3780.))); apa[2] = pow(ellipse.e2(),3) * (761./45360.); qp = pj_qsfn(1., e, one_es); } CUDA_DEV int CEA::forward(const Vec3& llh, Vec3& enu) const { /* * Transform from LLH to CEA. */ enu[0] = k0 * llh[0] * ellipse.a(); enu[1] = (.5 * ellipse.a() * pj_qsfn(sin(llh[1]), e, one_es)) / k0; enu[2] = llh[2]; return 0; } CUDA_DEV int CEA::inverse(const Vec3& enu, Vec3& llh) const { /* * Transform from LLH to CEA. */ llh[0] = enu[0] / (k0 * ellipse.a()); double beta = asin((2. * enu[1] * k0) / (ellipse.a() * qp)); llh[1] = beta + (apa[0] * sin(2. * beta)) + (apa[1] * sin(4. * beta)) + (apa[2] * sin(6. * beta)); llh[2] = enu[2]; return 0; } CUDA_HOSTDEV ProjectionBase* createProj(int epsgcode) { //Check for Lat/Lon if (epsgcode == 4326) { return new LonLat; } //Check for geocent else if (epsgcode == 4978) { return new Geocent; } //Check for UTM else if (epsgcode > 32600 && epsgcode < 32800) { return new UTM(epsgcode); } //Check for Polar Stereo else if (epsgcode == 3031 || epsgcode == 3413) { return new PolarStereo(epsgcode); } //EASE2 grid else if (epsgcode == 6933) { return new CEA; } else { //Somehow errors must be handled at this stage //Delegating to CPU code return NULL; } } CUDA_DEV int projTransform(const ProjectionBase *in, const ProjectionBase *out, const Vec3& inpts, Vec3& outpts) { if (in->_epsgcode == out->_epsgcode) { // If input/output projections are the same don't even bother processing for (int ii=0; ii<3;ii++) outpts[ii] = inpts[ii]; return 0; } else { Vec3 temp; if (in->inverse(inpts, temp) != 0) return -2; if (out->forward(temp, outpts) != 0) return 2; } return 0; }; __device__ int projInverse(int code, const Vec3& in, Vec3& out) { if (code == 4326) { LonLat proj; return proj.inverse(in, out); } else if (code == 3031 or code == 3413) { PolarStereo proj(code); return proj.inverse(in, out); } else { return 1; // failure } } }}}
the_stack
// sum reduction within a block // the following implementation is compatible for sm_20 and above // newer architectures may support faster implementations, such as warp shuffle, cooperative groups template <const int Nthreads> __device__ float sumReduceBlock(float sum, volatile float *shmem) { const int tid = threadIdx.x; shmem[tid] = sum; __syncthreads(); if (Nthreads >=1024) { if (tid < 512) { shmem[tid] += shmem[tid + 512]; } __syncthreads(); } if (Nthreads >= 512) { if (tid < 256) { shmem[tid] += shmem[tid + 256]; } __syncthreads(); } if (Nthreads >= 256) { if (tid < 128) { shmem[tid] += shmem[tid + 128]; } __syncthreads(); } if (Nthreads >= 128) { if (tid < 64) { shmem[tid] += shmem[tid + 64]; } __syncthreads(); } if (tid < 32) { shmem[tid] += shmem[tid + 32]; shmem[tid] += shmem[tid + 16]; shmem[tid] += shmem[tid + 8]; shmem[tid] += shmem[tid + 4]; shmem[tid] += shmem[tid + 2]; shmem[tid] += shmem[tid + 1]; } __syncthreads(); return shmem[0]; } // cuda kernel to subtract mean value from the images template<const int Nthreads> __global__ void cuArraysMean_kernel(float *images, float *image_sum, int imageSize, float invSize, int nImages) { __shared__ float shmem[Nthreads]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (bid >= nImages) return; const int imageIdx = bid; const int imageOffset = imageIdx * imageSize; float *imageD = images + imageOffset; float sum = 0.0f; // perform the reduction beyond one block // save the results for each thread in block for (int i = tid; i < imageSize; i += Nthreads) sum += imageD[i]; // reduction within the block sum = sumReduceBlock<Nthreads>(sum, shmem); const float mean = sum * invSize; if(tid ==0) image_sum[bid] = mean; } /** * Compute mean values for images * @param[in] images Input images * @param[out] mean Output mean values * @param[in] stream cudaStream */ void cuArraysMeanValue(cuArrays<float> *images, cuArrays<float> *mean, cudaStream_t stream) { const dim3 grid(images->count, 1, 1); const int imageSize = images->width*images->height; const float invSize = 1.0f/imageSize; cuArraysMean_kernel<NTHREADS> <<<grid,NTHREADS,0,stream>>>(images->devData, mean->devData, imageSize, invSize, images->count); getLastCudaError("cuArraysMeanValue kernel error\n"); } // cuda kernel to compute and subtracts mean value from the images template<const int Nthreads> __global__ void cuArraysSubtractMean_kernel(float *images, int imageSize, float invSize, int nImages) { __shared__ float shmem[Nthreads]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (bid >= nImages) return; const int imageIdx = bid; const int imageOffset = imageIdx * imageSize; float *imageD = images + imageOffset; // compute the sum float sum = 0.0f; for (int i = tid; i < imageSize; i += Nthreads) sum += imageD[i]; sum = sumReduceBlock<Nthreads>(sum, shmem); // compute the mean const float mean = sum * invSize; // subtract the mean from each pixel for (int i = tid; i < imageSize; i += Nthreads) imageD[i] -= mean; } /** * Compute and subtract mean values from images * @param[inout] images Input/Output images * @param[out] mean Output mean values * @param[in] stream cudaStream */ void cuArraysSubtractMean(cuArrays<float> *images, cudaStream_t stream) { const dim3 grid(images->count, 1, 1); const int imageSize = images->width*images->height; const float invSize = 1.0f/imageSize; cuArraysSubtractMean_kernel<NTHREADS> <<<grid,NTHREADS,0,stream>>>(images->devData, imageSize, invSize, images->count); getLastCudaError("cuArraysSubtractMean kernel error\n"); } // cuda kernel to compute summation on extracted correlation surface (Minyan) template<const int Nthreads> __global__ void cuArraysSumCorr_kernel(float *images, int *imagesValid, float *imagesSum, int *imagesValidCount, int imageSize, int nImages) { __shared__ float shmem[Nthreads]; const int tid = threadIdx.x; const int bid = blockIdx.x; if (bid >= nImages) return; const int imageIdx = bid; const int imageOffset = imageIdx * imageSize; float* imageD = images + imageOffset; int* imageValidD = imagesValid + imageOffset; float sum = 0.0f; int count = 0; for (int i = tid; i < imageSize; i += Nthreads) { sum += imageD[i] * imageD[i]; count += imageValidD[i]; } sum = sumReduceBlock<Nthreads>(sum, shmem); count = sumReduceBlock<Nthreads>(count, shmem); if(tid ==0) { imagesSum[bid] = sum; imagesValidCount[bid] = count; } } /** * Compute the variance of images (for SNR) * @param[in] images Input images * @param[in] imagesValid validity flags for each pixel * @param[out] imagesSum variance * @param[out] imagesValidCount count of total valid pixels * @param[in] stream cudaStream */ void cuArraysSumCorr(cuArrays<float> *images, cuArrays<int> *imagesValid, cuArrays<float> *imagesSum, cuArrays<int> *imagesValidCount, cudaStream_t stream) { const dim3 grid(images->count, 1, 1); const int imageSize = images->width*images->height; cuArraysSumCorr_kernel<NTHREADS> <<<grid,NTHREADS,0,stream>>>(images->devData, imagesValid->devData, imagesSum->devData, imagesValidCount->devData, imageSize, images->count); getLastCudaError("cuArraysSumValueCorr kernel error\n"); } // intra-block inclusive prefix sum template<int Nthreads2> __device__ void inclusive_prefix_sum(float sum, volatile float *shmem) { const int tid = threadIdx.x; shmem[tid] = sum; __syncthreads(); #pragma unroll for (int i = 0; i < Nthreads2; i++) { const int offset = 1 << i; if (tid >= offset) sum += shmem[tid - offset]; __syncthreads(); shmem[tid] = sum; __syncthreads(); } } // prefix sum of pixel value and pixel value^2 template<const int Nthreads2> __device__ float2 partialSums(const float v, volatile float* shmem, const int stride) { const int tid = threadIdx.x; volatile float *shMem = shmem + 1; volatile float *shMem2 = shMem + 1 + (1 << Nthreads2); inclusive_prefix_sum<Nthreads2>(v, shMem); inclusive_prefix_sum<Nthreads2>(v*v, shMem2); const float Sum = shMem [tid-1 + stride] - shMem [tid-1]; const float Sum2 = shMem2[tid-1 + stride] - shMem2[tid-1]; return make_float2(Sum, Sum2); } // cuda kernel for cuCorrNormalize template<const int Nthreads2> __global__ void cuCorrNormalize_kernel( int nImages, const float *templateIn, int templateNX, int templateNY, int templateSize, const float *imageIn, int imageNX, int imageNY, int imageSize, float *resultOut, int resultNX, int resultNY, int resultSize, float templateCoeff) { const int Nthreads = 1<<Nthreads2; __shared__ float shmem[Nthreads*3]; const int tid = threadIdx.x; const int imageIdx = blockIdx.z; if (imageIdx >= nImages) return; const int imageOffset = imageIdx * imageSize; const int templateOffset = imageIdx * templateSize; const int resultOffset = imageIdx * resultSize; const float * imageD = imageIn + imageOffset; const float *templateD = templateIn + templateOffset; float * resultD = resultOut + resultOffset; // template sum^2 float templateSum2 = 0.0f; for (int i = tid; i < templateSize; i += Nthreads) { const float t = templateD[i]; templateSum2 += t*t; } templateSum2 = sumReduceBlock<Nthreads>(templateSum2, shmem); __syncthreads(); // reset shared memory value shmem[tid] = shmem[tid + Nthreads] = shmem[tid + 2*Nthreads] = 0.0f; __syncthreads(); // perform the prefix sum and sum^2 for secondary window // see notes above float imageSum = 0.0f; float imageSum2 = 0.0f; int iaddr = 0; const int windowSize = templateNX*imageNY; // iterative till reaching the templateNX row of the secondary window // or the first row of correlation surface may be computed while (iaddr < windowSize) { // cum sum for each row with a width=templateNY const float2 res = partialSums<Nthreads2>(imageD[iaddr + tid], shmem, templateNY); // add to the total, which keeps track of the sum of area for each window imageSum += res.x; imageSum2 += res.y; // move to next row iaddr += imageNY; } // row reaches the end of first batch of windows // normalize the first row of the correlation surface if (tid < resultNY) { // normalizing factor const float norm2 = (imageSum2 - imageSum*imageSum*templateCoeff)*templateSum2; // normalize the correlation surface resultD[tid] *= rsqrtf(norm2 + FLT_EPSILON); } // iterative over the rest rows while (iaddr < imageSize) { // the prefix sum of the row removed is recomputed, to be subtracted const float2 res1 = partialSums<Nthreads2>(imageD[iaddr-windowSize + tid], shmem, templateNY); // the prefix sum of the new row, to be added const float2 res2 = partialSums<Nthreads2>(imageD[iaddr + tid], shmem, templateNY); imageSum += res2.x - res1.x; imageSum2 += res2.y - res1.y; // move to next row iaddr += imageNY; // normalize the correlation surface if (tid < resultNY) { const int ix = iaddr/imageNY; // get row index const int addr = (ix-templateNX)*resultNY; // get the correlation surface row index const float norm2 = (imageSum2 - imageSum*imageSum*templateCoeff)*templateSum2; resultD[addr + tid] *= rsqrtf(norm2 + FLT_EPSILON); } } } /** * Normalize a correlation surface * @param[in] templates Reference windows with mean subtracted * @param[in] images Secondary windows * @param[inout] results un-normalized correlation surface as input and normalized as output * @param[in] stream cudaStream * @warning The current implementation uses one thread for one column, therefore, * the secondary window width is limited to <=1024, the max threads in a block. */ void cuCorrNormalize(cuArrays<float> *templates, cuArrays<float> *images, cuArrays<float> *results, cudaStream_t stream) { const int nImages = images->count; const int imageNY = images->width; const dim3 grid(1, 1, nImages); const float invTemplateSize = 1.0f/templates->size; if (imageNY <= 64) { cuCorrNormalize_kernel< 6><<<grid, 64, 0, stream>>>(nImages, templates->devData, templates->height, templates->width, templates->size, images->devData, images->height, images->width, images->size, results->devData, results->height, results->width, results->size, invTemplateSize); getLastCudaError("cuCorrNormalize kernel error"); } else if (imageNY <= 128) { cuCorrNormalize_kernel< 7><<<grid, 128, 0, stream>>>(nImages, templates->devData, templates->height, templates->width, templates->size, images->devData, images->height, images->width, images->size, results->devData, results->height, results->width, results->size, invTemplateSize); getLastCudaError("cuCorrNormalize kernel error"); } else if (imageNY <= 256) { cuCorrNormalize_kernel< 8><<<grid, 256, 0, stream>>>(nImages, templates->devData, templates->height, templates->width, templates->size, images->devData, images->height, images->width, images->size, results->devData, results->height, results->width, results->size, invTemplateSize); getLastCudaError("cuCorrNormalize kernel error"); } else if (imageNY <= 512) { cuCorrNormalize_kernel< 9><<<grid, 512, 0, stream>>>(nImages, templates->devData, templates->height, templates->width, templates->size, images->devData, images->height, images->width, images->size, results->devData, results->height, results->width, results->size, invTemplateSize); getLastCudaError("cuCorrNormalize kernel error"); } else if (imageNY <= 1024) { cuCorrNormalize_kernel<10><<<grid,1024, 0, stream>>>(nImages, templates->devData, templates->height, templates->width, templates->size, images->devData, images->height, images->width, images->size, results->devData, results->height, results->width, results->size, invTemplateSize); getLastCudaError("cuCorrNormalize kernel error"); } else { fprintf(stderr, "The (oversampled) window size along the across direction %d should be smaller than 1024.\n", imageNY); throw; } } template<int N> struct Log2; template<> struct Log2<64> { static const int value = 6; }; template<> struct Log2<128> { static const int value = 7; }; template<> struct Log2<256> { static const int value = 8; }; template<> struct Log2<512> { static const int value = 9; }; template<> struct Log2<1024> { static const int value = 10; }; template<int Size> void cuCorrNormalizeFixed(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream) { const int nImages = correlation->count; const dim3 grid(1, 1, nImages); const float invReferenceSize = 1.0f/reference->size; cuCorrNormalize_kernel<Log2<Size>::value><<<grid, Size, 0, stream>>>(nImages, reference->devData, reference->height, reference->width, reference->size, secondary->devData, secondary->height, secondary->width, secondary->size, correlation->devData, correlation->height, correlation->width, correlation->size, invReferenceSize); getLastCudaError("cuCorrNormalize kernel error"); } template void cuCorrNormalizeFixed<64>(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream); template void cuCorrNormalizeFixed<128>(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream); template void cuCorrNormalizeFixed<256>(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream); template void cuCorrNormalizeFixed<512>(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream); template void cuCorrNormalizeFixed<1024>(cuArrays<float> *correlation, cuArrays<float> *reference, cuArrays<float> *secondary, cudaStream_t stream); // end of file
the_stack
#include "kernels/k_fixed_point.cuh" #define AVOGADRO 6.0221367e23 #define BOLTZ 0.008314462618 namespace timemachine { MonteCarloBarostat::MonteCarloBarostat( const int N, const double pressure, // Expected in Bar const double temperature, // Kelvin const std::vector<std::vector<int>> group_idxs, const int interval, const std::vector<BoundPotential *> bps, const int seed) : N_(N), pressure_(pressure), temperature_(temperature), interval_(interval), bps_(bps), group_idxs_(group_idxs), num_grouped_atoms_(0), d_sum_storage_(nullptr), d_sum_storage_bytes_(0), seed_(seed), step_(0) { // Trigger check that interval is valid this->set_interval(interval_); // lets not have another facepalm moment again... if (temperature < 100.0) { std::cout << "warning temperature less than 100K" << std::endl; } if (pressure > 10.0) { std::cout << "warning pressure more than 10bar" << std::endl; } curandErrchk(curandCreateGenerator(&cr_rng_, CURAND_RNG_PSEUDO_DEFAULT)); gpuErrchk(cudaMalloc(&d_rand_, 2 * sizeof(double))); curandErrchk(curandSetPseudoRandomGeneratorSeed(cr_rng_, seed_)); const int num_mols = group_idxs_.size(); gpuErrchk(cudaMalloc(&d_x_after_, N_ * 3 * sizeof(*d_x_after_))); gpuErrchk(cudaMalloc(&d_box_after_, 3 * 3 * sizeof(*d_box_after_))); gpuErrchk(cudaMalloc(&d_u_buffer_, N_ * sizeof(*d_u_buffer_))); gpuErrchk(cudaMalloc(&d_u_after_buffer_, N_ * sizeof(*d_u_after_buffer_))); gpuErrchk(cudaMalloc(&d_init_u_, 1 * sizeof(*d_init_u_))); gpuErrchk(cudaMalloc(&d_final_u_, 1 * sizeof(*d_final_u_))); gpuErrchk(cudaMalloc(&d_num_accepted_, 1 * sizeof(*d_num_accepted_))); gpuErrchk(cudaMalloc(&d_num_attempted_, 1 * sizeof(*d_num_attempted_))); gpuErrchk(cudaMalloc(&d_volume_, 1 * sizeof(*d_volume_))); gpuErrchk(cudaMalloc(&d_length_scale_, 1 * sizeof(*d_length_scale_))); gpuErrchk(cudaMalloc(&d_volume_scale_, 1 * sizeof(*d_volume_scale_))); gpuErrchk(cudaMalloc(&d_volume_delta_, 1 * sizeof(*d_volume_delta_))); gpuErrchk(cudaMemset(d_volume_scale_, 0, 1 * sizeof(*d_volume_scale_))); std::set<int> group_set; for (int i = 0; i < num_mols; i++) { std::vector<int> atoms = group_idxs[i]; const int num_atoms = atoms.size(); num_grouped_atoms_ += num_atoms; for (int j = 0; j < num_atoms; j++) { int idx = atoms[j]; if (idx < 0 || idx >= N_) { throw std::runtime_error("Grouped indices must be between 0 and N"); } group_set.insert(idx); } } // Verify that all of the group indices are unique if (group_set.size() != num_grouped_atoms_) { throw std::runtime_error("All grouped indices must be unique"); } gpuErrchk(cudaMalloc(&d_centroids_, num_mols * 3 * sizeof(*d_centroids_))); gpuErrchk(cudaMalloc(&d_atom_idxs_, num_grouped_atoms_ * sizeof(*d_atom_idxs_))); gpuErrchk(cudaMalloc(&d_mol_idxs_, num_grouped_atoms_ * sizeof(*d_mol_idxs_))); gpuErrchk(cudaMalloc(&d_mol_offsets_, (num_mols + 1) * sizeof(*d_mol_offsets_))); int offset = 0; int mol_offsets[num_mols + 1]; int mol_idxs[num_grouped_atoms_]; int atom_idxs[num_grouped_atoms_]; for (int i = 0; i < num_mols; i++) { std::vector<int> atoms = group_idxs[i]; mol_offsets[i] = offset; int num_atoms = atoms.size(); for (int j = 0; j < num_atoms; j++) { mol_idxs[offset + j] = i; atom_idxs[offset + j] = atoms[j]; } offset += num_atoms; } mol_offsets[num_mols] = offset; gpuErrchk(cudaMemcpy(d_mol_idxs_, mol_idxs, num_grouped_atoms_ * sizeof(*d_mol_idxs_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_atom_idxs_, atom_idxs, num_grouped_atoms_ * sizeof(*d_atom_idxs_), cudaMemcpyHostToDevice)); gpuErrchk( cudaMemcpy(d_mol_offsets_, mol_offsets, (num_mols + 1) * sizeof(*d_mol_offsets_), cudaMemcpyHostToDevice)); // Use a typed nullptr so cub can calculate space needed to reduce unsigned long long *d_in_tmp = nullptr; // dummy unsigned long long *d_out_tmp = nullptr; // dummy // Compute amount of space to reduce energies cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_in_tmp, d_out_tmp, N_); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaMalloc(&d_sum_storage_, d_sum_storage_bytes_)); this->reset_counters(); }; MonteCarloBarostat::~MonteCarloBarostat() { gpuErrchk(cudaFree(d_x_after_)); gpuErrchk(cudaFree(d_centroids_)); gpuErrchk(cudaFree(d_atom_idxs_)); gpuErrchk(cudaFree(d_mol_idxs_)); gpuErrchk(cudaFree(d_mol_offsets_)); gpuErrchk(cudaFree(d_box_after_)); gpuErrchk(cudaFree(d_u_after_buffer_)); gpuErrchk(cudaFree(d_u_buffer_)); gpuErrchk(cudaFree(d_init_u_)); gpuErrchk(cudaFree(d_final_u_)); gpuErrchk(cudaFree(d_rand_)) gpuErrchk(cudaFree(d_length_scale_)); gpuErrchk(cudaFree(d_volume_scale_)); gpuErrchk(cudaFree(d_volume_delta_)); gpuErrchk(cudaFree(d_num_accepted_)); gpuErrchk(cudaFree(d_num_attempted_)); curandErrchk(curandDestroyGenerator(cr_rng_)); }; void __global__ rescale_positions( const int N, // Number of atoms to shift double *__restrict__ coords, // Cordinates const double *__restrict__ length_scale, // [1] const double *__restrict__ box, // [9] double *__restrict__ scaled_box, // [9] const int *__restrict__ atom_idxs, // [N] const int *__restrict__ mol_idxs, // [N] const int *__restrict__ mol_offsets, // [N] const unsigned long long *__restrict__ centroids // [N*3] ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } const int atom_idx = atom_idxs[idx]; const int mol_idx = mol_idxs[idx]; const double center_x = box[0 * 3 + 0] * 0.5; const double center_y = box[1 * 3 + 1] * 0.5; const double center_z = box[2 * 3 + 2] * 0.5; const double num_atoms = static_cast<double>(mol_offsets[mol_idx + 1] - mol_offsets[mol_idx]); const double centroid_x = FIXED_TO_FLOAT<double>(centroids[mol_idx * 3 + 0]) / num_atoms; const double centroid_y = FIXED_TO_FLOAT<double>(centroids[mol_idx * 3 + 1]) / num_atoms; const double centroid_z = FIXED_TO_FLOAT<double>(centroids[mol_idx * 3 + 2]) / num_atoms; const double displacement_x = ((centroid_x - center_x) * length_scale[0]) + center_x - centroid_x; const double displacement_y = ((centroid_y - center_y) * length_scale[0]) + center_y - centroid_y; const double displacement_z = ((centroid_z - center_z) * length_scale[0]) + center_z - centroid_z; coords[atom_idx * 3 + 0] += displacement_x; coords[atom_idx * 3 + 1] += displacement_y; coords[atom_idx * 3 + 2] += displacement_z; if (atom_idx == 0) { scaled_box[0 * 3 + 0] *= length_scale[0]; scaled_box[1 * 3 + 1] *= length_scale[0]; scaled_box[2 * 3 + 2] *= length_scale[0]; } } void __global__ find_group_centroids( const int N, // Number of atoms to shift const double *__restrict__ coords, // Coordinates const int *__restrict__ atom_idxs, // [N] const int *__restrict__ mol_idxs, // [N] unsigned long long *__restrict__ centroids // [num_molecules * 3] ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } const int atom_idx = atom_idxs[idx]; const int mol_idx = mol_idxs[idx]; atomicAdd(centroids + mol_idx * 3 + 0, FLOAT_TO_FIXED<double>(coords[atom_idx * 3 + 0])); atomicAdd(centroids + mol_idx * 3 + 1, FLOAT_TO_FIXED<double>(coords[atom_idx * 3 + 1])); atomicAdd(centroids + mol_idx * 3 + 2, FLOAT_TO_FIXED<double>(coords[atom_idx * 3 + 2])); } void __global__ k_setup_barostat_move( const double *__restrict__ rand, // [2], use first value, second value is metropolis condition double *__restrict__ d_box, // [3*3] double *__restrict__ d_volume_delta, // [1] double *__restrict__ d_volume_scale, // [1] double *__restrict__ d_length_scale // [1] ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= 1) { return; // Only a single thread needs to perform this operation } const double volume = d_box[0 * 3 + 0] * d_box[1 * 3 + 1] * d_box[2 * 3 + 2]; if (d_volume_scale[0] == 0) { d_volume_scale[0] = 0.01 * volume; } const double delta_volume = d_volume_scale[0] * 2 * (rand[0] - 0.5); const double new_volume = volume + delta_volume; d_volume_delta[0] = delta_volume; d_length_scale[0] = cbrt(new_volume / volume); } void __global__ k_decide_move( const int N, const int num_molecules, const double kt, const double pressure, const double *__restrict__ rand, // [2] Use second value double *__restrict__ d_volume_delta, double *__restrict__ d_volume_scale, const unsigned long long *__restrict__ d_init_u, const unsigned long long *__restrict__ d_final_u, double *__restrict__ d_box, const double *__restrict__ d_box_output, double *__restrict__ d_x, const double *__restrict__ d_x_output, int *__restrict__ num_accepted, int *__restrict__ num_attempted) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } const double volume = d_box[0 * 3 + 0] * d_box[1 * 3 + 1] * d_box[2 * 3 + 2]; const double new_volume = volume + d_volume_delta[0]; const double energy_delta = FIXED_TO_FLOAT<double>(d_final_u[0] - d_init_u[0]); const double w = energy_delta + pressure * d_volume_delta[0] - num_molecules * kt * std::log(new_volume / volume); const bool rejected = w > 0 && rand[1] > std::exp(-w / kt); if (idx == 0) { if (!rejected) { num_accepted[0]++; } num_attempted[0]++; if (num_attempted[0] >= 10) { if (num_accepted[0] < 0.25 * num_attempted[0]) { d_volume_scale[0] /= 1.1; // Reset the counters num_attempted[0] = 0; num_accepted[0] = 0; } else if (num_accepted[0] > 0.75 * num_attempted[0]) { d_volume_scale[0] = min(d_volume_scale[0] * 1.1, volume * 0.3); // Reset the counters num_attempted[0] = 0; num_accepted[0] = 0; } } } if (rejected) { return; } // If the mc move was accepted copy all of the data into place if (idx < 9) { d_box[idx] = d_box_output[idx]; } #pragma unroll for (int i = 0; i < 3; i++) { d_x[idx * 3 + i] = d_x_output[idx * 3 + i]; } } void MonteCarloBarostat::reset_counters() { gpuErrchk(cudaMemset(d_num_accepted_, 0, sizeof(*d_num_accepted_))); gpuErrchk(cudaMemset(d_num_attempted_, 0, sizeof(*d_num_attempted_))); } void MonteCarloBarostat::inplace_move( double *d_x, // [N*3] double *d_box, // [3*3] const double lambda, cudaStream_t stream) { step_++; if (step_ % interval_ != 0) { return; } curandErrchk(curandSetStream(cr_rng_, stream)); // Generate scaling and metropolis conditions in one pass curandErrchk(curandGenerateUniformDouble(cr_rng_, d_rand_, 2)); gpuErrchk(cudaMemsetAsync(d_init_u_, 0, sizeof(*d_init_u_), stream)); gpuErrchk(cudaMemsetAsync(d_final_u_, 0, sizeof(*d_final_u_), stream)); gpuErrchk(cudaMemsetAsync(d_u_buffer_, 0, N_ * sizeof(*d_u_buffer_), stream)); gpuErrchk(cudaMemsetAsync(d_u_after_buffer_, 0, N_ * sizeof(*d_u_after_buffer_), stream)); for (int i = 0; i < bps_.size(); i++) { bps_[i]->execute_device(N_, d_x, d_box, lambda, nullptr, nullptr, nullptr, d_u_buffer_, stream); } cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_u_buffer_, d_init_u_, N_, stream); gpuErrchk(cudaPeekAtLastError()); k_setup_barostat_move<<<1, 1, 0, stream>>>(d_rand_, d_box, d_volume_delta_, d_volume_scale_, d_length_scale_); gpuErrchk(cudaPeekAtLastError()); const int num_molecules = group_idxs_.size(); gpuErrchk(cudaMemsetAsync(d_centroids_, 0, num_molecules * 3 * sizeof(*d_centroids_), stream)); // Create duplicates of the coords/box that we can modify gpuErrchk(cudaMemcpyAsync(d_x_after_, d_x, N_ * 3 * sizeof(*d_x), cudaMemcpyDeviceToDevice, stream)); gpuErrchk(cudaMemcpyAsync(d_box_after_, d_box, 3 * 3 * sizeof(*d_box_after_), cudaMemcpyDeviceToDevice, stream)); const int tpb = 32; const int blocks = (num_grouped_atoms_ + tpb - 1) / tpb; find_group_centroids<<<blocks, tpb, 0, stream>>>(num_grouped_atoms_, d_x, d_atom_idxs_, d_mol_idxs_, d_centroids_); gpuErrchk(cudaPeekAtLastError()); // Scale centroids rescale_positions<<<blocks, tpb, 0, stream>>>( num_grouped_atoms_, d_x_after_, d_length_scale_, d_box, d_box_after_, // Box will be rescaled by length_scale d_atom_idxs_, d_mol_idxs_, d_mol_offsets_, d_centroids_); gpuErrchk(cudaPeekAtLastError()); for (int i = 0; i < bps_.size(); i++) { bps_[i]->execute_device( N_, d_x_after_, d_box_after_, lambda, nullptr, nullptr, nullptr, d_u_after_buffer_, stream); } cub::DeviceReduce::Sum(d_sum_storage_, d_sum_storage_bytes_, d_u_after_buffer_, d_final_u_, N_, stream); gpuErrchk(cudaPeekAtLastError()); double pressure = pressure_ * AVOGADRO * 1e-25; const double kT = BOLTZ * temperature_; const int move_blocks = (N_ + tpb - 1) / tpb; k_decide_move<<<move_blocks, tpb, 0, stream>>>( N_, num_molecules, kT, pressure, d_rand_, d_volume_delta_, d_volume_scale_, d_init_u_, d_final_u_, d_box, d_box_after_, d_x, d_x_after_, d_num_accepted_, d_num_attempted_); gpuErrchk(cudaPeekAtLastError()) }; void MonteCarloBarostat::set_interval(const int interval) { if (interval <= 0) { throw std::runtime_error("Barostat interval must be greater than 0"); } interval_ = interval; // Clear the step, to ensure user can expect that in N steps the barostat will trigger step_ = 0; } int MonteCarloBarostat::get_interval() { return interval_; } void MonteCarloBarostat::set_pressure(const double pressure) { pressure_ = pressure; // Could have equilibrated and be a large number of steps from shifting volume // adjustment, ie num attempted = 300 and num accepted = 150 this->reset_counters(); } } // namespace timemachine
the_stack
#pragma once #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/sage/sage_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> namespace gunrock { namespace app { namespace sage { /** * @brief Speciflying parameters for SSSP Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } static const cub::CacheLoadModifier W_LOAD = cub::LOAD_LDG; // for Wa and Wf static const cub::CacheLoadModifier F_LOAD = cub::LOAD_LDG; // for features static const cub::CacheLoadModifier S_LOAD = cub::LOAD_CA; // for Sums static const cub::CacheStoreModifier S_STORE = cub::STORE_WB; // for Sums static const cub::CacheLoadModifier T_LOAD = cub::LOAD_CA; // for temps static const cub::CacheStoreModifier T_STORE = cub::STORE_WB; // for temps template <typename GraphT, typename ValueT> __global__ void sage_kernel1(typename GraphT::VertexT source_start, int num_children_per_source, const GraphT graph, uint64_t feature_column, ValueT *features, int num_leafs_per_child, ValueT *sums, curandState *rand_states, ValueT *sums_child_feat, typename GraphT::VertexT *children, typename GraphT::SizeT num_children) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; SizeT child_num = blockIdx.x; extern __shared__ VertexT s_leafs[]; __shared__ VertexT s_child; __shared__ SizeT s_child_degree, s_child_edge_offset; SizeT thread_id = (SizeT)blockIdx.x * blockDim.x + threadIdx.x; while (child_num < num_children) { if (threadIdx.x == 0) { VertexT source = child_num / num_children_per_source + source_start; s_child = graph.GetEdgeDest(graph.GetNeighborListOffset(source) + curand_uniform(rand_states + thread_id) * graph.GetNeighborListLength(source)); children[child_num] = s_child; s_child_degree = graph.GetNeighborListLength(s_child); s_child_edge_offset = graph.GetNeighborListOffset(s_child); } __syncthreads(); for (int i = threadIdx.x; i < num_leafs_per_child; i += blockDim.x) { s_leafs[i] = graph.GetEdgeDest(s_child_edge_offset + curand_uniform(rand_states + thread_id) * s_child_degree); } __syncthreads(); for (auto i = threadIdx.x; i < feature_column; i += blockDim.x) { ValueT sum = 0; for (int j = 0; j < num_leafs_per_child; j++) sum += Load<F_LOAD>(features + (s_leafs[j] * feature_column + i)); sum /= num_leafs_per_child; Store<S_STORE>(sums + (child_num * feature_column + i), sum); atomicAdd(sums_child_feat + (child_num / num_children_per_source * feature_column + i), Load<F_LOAD>(features + (s_child * feature_column + i)) / num_children_per_source); } __syncthreads(); child_num += gridDim.x; } } template <int LOG_THREADS_, typename VertexT, typename SizeT, typename ValueT> __global__ void sage_kernel2(int num_children_per_source, uint64_t feature_column, ValueT *features, ValueT *W_f_1, int Wf1_dim1, VertexT *children, ValueT *W_a_1, int Wa1_dim1, int Wa2_dim0, int Wf2_dim0, ValueT *children_temp, ValueT *sums_child_feat, ValueT *sums, SizeT num_children) { typedef util::reduce::BlockReduce<ValueT, LOG_THREADS_> BlockReduceT; __shared__ VertexT s_child; __shared__ typename BlockReduceT::TempSpace reduce_space; SizeT child_num = blockIdx.x; while (child_num < num_children) { if (threadIdx.x == 0) { s_child = children[child_num]; } __syncthreads(); ValueT val = 0; if (threadIdx.x < Wf1_dim1) { auto f_offset = s_child * feature_column; for (int f = 0; f < feature_column; f++) val += Load<F_LOAD>(features + f_offset + f) * Load<W_LOAD>(W_f_1 + (f * Wf1_dim1 + threadIdx.x)); } else if (threadIdx.x < Wf1_dim1 + Wa1_dim1) { auto f_offset = child_num * feature_column; for (int f = 0; f < feature_column; f++) val += Load<cub::LOAD_LDG>(sums + f_offset + f) * Load<W_LOAD>(W_a_1 + (f * Wa1_dim1 + threadIdx.x - Wf1_dim1)); } if (val < 0) val = 0; // relu() double L2_child_temp = BlockReduceT::Reduce( val * val, [](const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, reduce_space); if (threadIdx.x < Wa2_dim0) { L2_child_temp = 1.0 / sqrt(L2_child_temp); val *= L2_child_temp; atomicAdd(children_temp + (child_num / num_children_per_source) * Wa2_dim0 + threadIdx.x, val / num_children_per_source); } __syncthreads(); child_num += gridDim.x; } } template <int LOG_THREADS_, typename SizeT, typename VertexT, typename ValueT> __global__ void sage_kernel3(uint64_t feature_column, ValueT *features, VertexT source_start, ValueT *W_f_1, int Wf1_dim1, ValueT *children_temp, ValueT *sums_child_feat, ValueT *W_a_1, int Wa1_dim1, ValueT *W_f_2, int Wf2_dim1, int Wf2_dim0, ValueT *W_a_2, int Wa2_dim1, int Wa2_dim0, ValueT *source_result, int result_column, ValueT *source_temp, VertexT num_sources, bool use_shared_source_temp) { typedef util::reduce::BlockReduce<ValueT, LOG_THREADS_> BlockReduceT; __shared__ typename BlockReduceT::TempSpace reduce_space; __shared__ double s_L2; extern __shared__ ValueT s_source_temp[]; VertexT source_num = blockIdx.x; while (source_num < num_sources) { ValueT val = 0; if (threadIdx.x < Wf1_dim1) { auto f_offset = (source_start + source_num) * feature_column; for (int f = 0; f < feature_column; f++) val += Load<F_LOAD>(features + f_offset + f) * Load<W_LOAD>(W_f_1 + (f * Wf1_dim1 + threadIdx.x)); } else if (threadIdx.x < Wf2_dim0) { auto f_offset = source_num * feature_column; for (int f = 0; f < feature_column; f++) val += Load<S_LOAD>(sums_child_feat + f_offset + f) * Load<W_LOAD>(W_a_1 + (f * Wa1_dim1 + threadIdx.x - Wf1_dim1)); } if (val < 0) val = 0; // relu() double L2 = BlockReduceT::Reduce( val * val, [](const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, reduce_space); if (threadIdx.x == 0) s_L2 = 1.0 / sqrt(L2); __syncthreads(); if (threadIdx.x < Wf2_dim0) { // L2 = 1.0 / sqrt(L2); if (use_shared_source_temp) s_source_temp[threadIdx.x] = val * s_L2; else Store<T_STORE>(source_temp + (source_num * Wf2_dim0 + threadIdx.x), (ValueT)(val * s_L2)); } __syncthreads(); val = 0; if (threadIdx.x < Wf2_dim1) { SizeT offset = source_num * Wf2_dim0; for (int y = 0; y < Wf2_dim0; y++) val += (use_shared_source_temp ? s_source_temp[y] : Load<T_LOAD>(source_temp + offset + y)) * Load<W_LOAD>(W_f_2 + (y * Wf2_dim1 + threadIdx.x)); } else if (threadIdx.x < result_column) { SizeT offset = source_num * Wa2_dim0; for (int y = 0; y < Wa2_dim0; y++) val += Load<cub::LOAD_LDG>(children_temp + offset + y) * Load<W_LOAD>(W_a_2 + (y * Wa2_dim1 + threadIdx.x - Wf2_dim1)); } if (val < 0) val = 0; L2 = BlockReduceT::Reduce( val * val, [](const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, reduce_space); if (threadIdx.x == 0) s_L2 = 1.0 / sqrt(L2); __syncthreads(); if (threadIdx.x < result_column) { // L2 = 1.0 / sqrt(L2); Store<cub::STORE_WT>( source_result + (source_num * result_column + threadIdx.x), (ValueT)(val * s_L2)); } __syncthreads(); source_num += gridDim.x; } } /** * @brief defination of SAGE iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct SAGEIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push // | // (((EnactorT::Problem::FLAG & // Mark_Predecessors) != 0) ? // Update_Predecessors : 0x0) > { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::CooT CooT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push // | // (((EnactorT::Problem::FLAG & Mark_Predecessors) // != 0) ? // Update_Predecessors : 0x0) > BaseIterationLoop; SAGEIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of sage, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // Data sage that works on auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &W_f_1 = data_slice.W_f_1_1D; auto Wf1_dim1 = data_slice.Wf1_dim1; auto &W_a_1 = data_slice.W_a_1_1D; auto Wa1_dim1 = data_slice.Wa1_dim1; auto &W_f_2 = data_slice.W_f_2_1D; auto Wf2_dim0 = data_slice.Wf2_dim0; auto Wf2_dim1 = data_slice.Wf2_dim1; auto &W_a_2 = data_slice.W_a_2_1D; auto Wa2_dim0 = data_slice.Wa2_dim0; auto Wa2_dim1 = data_slice.Wa2_dim1; auto &features = data_slice.features_1D; uint64_t feature_column = data_slice.feature_column; auto &source_result = data_slice.source_result; auto result_column = data_slice.result_column; auto num_children_per_source = data_slice.num_children_per_source; auto num_leafs_per_child = data_slice.num_leafs_per_child; auto &sums = data_slice.sums; auto &sums_child_feat = data_slice.sums_child_feat; // auto &child_temp = data_slice.child_temp; auto &children_temp = data_slice.children_temp; auto &children = data_slice.children; auto &rand_states = data_slice.rand_states; auto &retval = enactor_stats.retval; auto &stream = enactor_slice.stream; auto &iteration = enactor_stats.iteration; VertexT source_start = iteration * data_slice.batch_size; VertexT source_end = (iteration + 1) * data_slice.batch_size; if (source_end >= graph.nodes) source_end = graph.nodes; VertexT num_sources = source_end - source_start; SizeT num_children = num_sources * data_slice.num_children_per_source; util::PrintMsg("Processing sources [" + std::to_string(source_start) + ", " + std::to_string(source_start + num_sources) + ")", data_slice.debug); GUARD_CU( children_temp.ForEach([] __host__ __device__(ValueT & val) { val = 0; }, num_sources * Wf2_dim0, util::DEVICE, stream)); GUARD_CU(sums_child_feat.ForEach( [] __host__ __device__(ValueT & val) { val = 0; }, num_sources * feature_column, util::DEVICE, stream)); if (data_slice.custom_kernels) { sage_kernel1<<<2560, min((int)feature_column, 512), num_leafs_per_child * sizeof(VertexT), stream>>>( source_start, num_children_per_source, graph, feature_column, features.GetPointer(util::DEVICE), num_leafs_per_child, sums.GetPointer(util::DEVICE), rand_states.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), children.GetPointer(util::DEVICE), num_children); } else { int grid_size = 80; int block_size = 256; GUARD_CU(children.ForAll( [source_start, num_children_per_source, graph, feature_column, features, num_leafs_per_child, sums, rand_states, sums_child_feat, grid_size, block_size] __host__ __device__(VertexT * childs, const SizeT &i) { VertexT source = i / num_children_per_source + source_start; // SizeT offset = curand_uniform(rand_states + i) // * graph.GetNeighborListLength(source); // SizeT edge = graph.GetNeighborListOffset(source) + offset; // VertexT child = graph.GetEdgeDest(edge); VertexT child = graph.GetEdgeDest( graph.GetNeighborListOffset(source) + curand_uniform(rand_states + (i % (grid_size * block_size))) * graph.GetNeighborListLength(source)); childs[i] = child; SizeT child_degree = graph.GetNeighborListLength(child); SizeT child_edge_offset = graph.GetNeighborListOffset(child); // float sums [64] = {0.0} ; //local vector auto f_offset = i * feature_column; for (auto f = 0; f < feature_column; f++) Store<S_STORE>(sums + (f_offset + f), (ValueT)0); for (int j = 0; j < num_leafs_per_child; j++) { // SizeT offset2 = 0;//cuRand() * child_degree; // SizeT edge2 = graph.GetNeighborListOffset(child) // + curand_uniform(rand_states + i) * child_degree; // VertexT leaf = graph.GetEdgeDest(edge2); VertexT leaf = graph.GetEdgeDest( child_edge_offset + curand_uniform(rand_states + (i % (grid_size * block_size))) * child_degree); auto offset = leaf * feature_column; for (auto f = 0; f < feature_column; f++) { Store<S_STORE>(sums + (f_offset + f), Load<S_LOAD>(sums + (f_offset + f)) + Load<F_LOAD>(features + (offset + f))); /// num_neigh2;// merged line 176 171 } } for (auto f = 0; f < feature_column; f++) Store<S_STORE>( sums + (f_offset + f), Load<S_LOAD>(sums + (f_offset + f)) / num_leafs_per_child); // agg feaures for leaf nodes alg2 line 11 k = 1; auto offset = i / num_children_per_source * feature_column; f_offset = child * feature_column; // SizeT f_offset = children[i] * feature_column; for (auto f = 0; f < feature_column; f++) { atomicAdd(sums_child_feat + offset + f, Load<F_LOAD>(features + (f_offset + f)) / num_children_per_source); // merge 220 and 226 } }, num_children, util::DEVICE, stream, 80, 256)); } // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); if (data_slice.custom_kernels && Wa2_dim0 <= 1024) { if (Wa2_dim0 <= 128) sage_kernel2<7><<<1280, 128, 0, stream>>>( num_children_per_source, feature_column, features.GetPointer(util::DEVICE), W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, Wa2_dim0, Wf2_dim0, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), sums.GetPointer(util::DEVICE), num_children); else if (Wa2_dim0 <= 256) sage_kernel2<8><<<1280, 256, 0, stream>>>( num_children_per_source, feature_column, features.GetPointer(util::DEVICE), W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, Wa2_dim0, Wf2_dim0, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), sums.GetPointer(util::DEVICE), num_children); else if (Wa2_dim0 <= 512) sage_kernel2<9><<<1280, 512, 0, stream>>>( num_children_per_source, feature_column, features.GetPointer(util::DEVICE), W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, Wa2_dim0, Wf2_dim0, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), sums.GetPointer(util::DEVICE), num_children); else if (Wa2_dim0 <= 1024) sage_kernel2<10><<<1280, 1024, 0, stream>>>( num_children_per_source, feature_column, features.GetPointer(util::DEVICE), W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, Wa2_dim0, Wf2_dim0, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), sums.GetPointer(util::DEVICE), num_children); } else { GUARD_CU(data_slice.child_temp.ForAll( [num_children_per_source, feature_column, features, W_f_1, Wf1_dim1, children, W_a_1, Wa1_dim1, Wa2_dim0, Wf2_dim0, children_temp, sums_child_feat, sums] __host__ __device__(ValueT * child_temp_, const SizeT &i) { ValueT *child_temp = child_temp_ + i * Wf2_dim0; auto f_offset = children[i] * feature_column; double L2_child_temp = 0.0; for (int x = 0; x < Wf1_dim1; x++) { ValueT val = 0; for (auto f = 0; f < feature_column; f++) val += Load<F_LOAD>(features + (f_offset + f)) * Load<W_LOAD>(W_f_1 + (f * Wf1_dim1 + x)); if (val < 0) // relu() val = 0; L2_child_temp += val * val; Store<T_STORE>(child_temp + x, val); } // got 1st half of h_B1^1 auto offset = i * feature_column; for (int x = 0; x < Wa1_dim1; x++) { ValueT val = 0; for (auto f = 0; f < feature_column; f++) val += Load<cub::LOAD_LDG>(sums + (offset + f)) * Load<W_LOAD>(W_a_1 + (f * Wa1_dim1 + x)); if (val < 0) // relu() val = 0; L2_child_temp += val * val; Store<T_STORE>(child_temp + (x + Wf1_dim1), val); } // got 2nd half of h_B1^1 // activation and L-2 normalize // double L2_child_temp = 0.0; // for (int x =0; x < Wa2_dim0; x++) //{ // ValueT val = child_temp[x]; // if (val < 0) // relu() // val = 0; // L2_child_temp += val * val; // child_temp[x] = val; //} //finished relu L2_child_temp = 1.0 / sqrt(L2_child_temp); offset = i / num_children_per_source * Wa2_dim0; for (int x = 0; x < Wa2_dim0; x++) { // child_temp[idx_0] = child_temp[idx_0] /sqrt (L2_child_temp); // child_temp[x] *= L2_child_temp; ValueT val = Load<T_LOAD>(child_temp + x); val *= L2_child_temp; //}//finished L-2 norm, got h_B1^1, algo2 line13 // add the h_B1^1 to children_temp, also agg it // for (int x =0; x < Wa2_dim0; x ++ ) //205 //{ atomicAdd(children_temp + (offset + x), val / num_children_per_source); } // finished agg (h_B1^1) // end of for each child }, num_children, util::DEVICE, stream, 80)); } // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); if (iteration != 0) { GUARD_CU2(cudaStreamWaitEvent(stream, data_slice.d2h_finish, 0), "cudaStreamWaitEvent failed"); } int max_dim = max(Wf1_dim1 + Wa1_dim1, Wf2_dim1 + Wa2_dim1); if (data_slice.custom_kernels && max_dim <= 1024) { size_t shared_size = Wf2_dim0 * sizeof(ValueT); bool use_shared_source_temp = (shared_size <= 24 * 1024); if (!use_shared_source_temp) shared_size = 0; if (max_dim <= 128) sage_kernel3<7, SizeT><<<1280, 128, shared_size, stream>>>( feature_column, features.GetPointer(util::DEVICE), source_start, W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, W_f_2.GetPointer(util::DEVICE), Wf2_dim1, Wf2_dim0, W_a_2.GetPointer(util::DEVICE), Wa2_dim1, Wa2_dim0, source_result.GetPointer(util::DEVICE), result_column, data_slice.source_temp.GetPointer(util::DEVICE), num_sources, use_shared_source_temp); else if (max_dim <= 256) sage_kernel3<8, SizeT><<<1280, 256, shared_size, stream>>>( feature_column, features.GetPointer(util::DEVICE), source_start, W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, W_f_2.GetPointer(util::DEVICE), Wf2_dim1, Wf2_dim0, W_a_2.GetPointer(util::DEVICE), Wa2_dim1, Wa2_dim0, source_result.GetPointer(util::DEVICE), result_column, data_slice.source_temp.GetPointer(util::DEVICE), num_sources, use_shared_source_temp); else if (max_dim <= 512) sage_kernel3<9, SizeT><<<1280, 512, shared_size, stream>>>( feature_column, features.GetPointer(util::DEVICE), source_start, W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, W_f_2.GetPointer(util::DEVICE), Wf2_dim1, Wf2_dim0, W_a_2.GetPointer(util::DEVICE), Wa2_dim1, Wa2_dim0, source_result.GetPointer(util::DEVICE), result_column, data_slice.source_temp.GetPointer(util::DEVICE), num_sources, use_shared_source_temp); else if (max_dim <= 1024) sage_kernel3<10, SizeT><<<1280, 1024, shared_size, stream>>>( feature_column, features.GetPointer(util::DEVICE), source_start, W_f_1.GetPointer(util::DEVICE), Wf1_dim1, children_temp.GetPointer(util::DEVICE), sums_child_feat.GetPointer(util::DEVICE), W_a_1.GetPointer(util::DEVICE), Wa1_dim1, W_f_2.GetPointer(util::DEVICE), Wf2_dim1, Wf2_dim0, W_a_2.GetPointer(util::DEVICE), Wa2_dim1, Wa2_dim0, source_result.GetPointer(util::DEVICE), result_column, data_slice.source_temp.GetPointer(util::DEVICE), num_sources, use_shared_source_temp); } else { GUARD_CU(data_slice.source_temp.ForAll( [feature_column, features, source_start, W_f_1, Wf1_dim1, children_temp, sums_child_feat, W_a_1, Wa1_dim1, W_f_2, Wf2_dim1, Wf2_dim0, W_a_2, Wa2_dim1, Wa2_dim0, source_result, result_column] __host__ __device__(ValueT * source_temp_, const SizeT &i) { ValueT *source_temp = source_temp_ + i * Wf2_dim0; VertexT source = source_start + i; auto offset = source * feature_column; // get ebedding vector for child node (h_{B2}^{1}) alg2 line 12 double L2_source_temp = 0.0; for (int x = 0; x < Wf1_dim1; x++) { ValueT val = 0; for (auto f = 0; f < feature_column; f++) val += Load<F_LOAD>(features + (offset + f)) * Load<W_LOAD>(W_f_1 + (f * Wf1_dim1 + x)); if (val < 0) val = 0; // relu() L2_source_temp += val * val; Store<T_STORE>(source_temp + x, val); } // got 1st half of h_B2^1 offset = i * feature_column; for (int x = 0; x < Wa1_dim1; x++) { ValueT val = 0; for (auto f = 0; f < feature_column; f++) val += sums_child_feat[offset + f] * Load<W_LOAD>(W_a_1 + (f * Wa1_dim1 + x)); if (val < 0) val = 0; // relu() L2_source_temp += val * val; Store<T_STORE>(source_temp + (Wf1_dim1 + x), val); } // got 2nd half of h_B2^1 // for (int x =0; x < Wf2_dim0; x++) //{ // ValueT val = source_temp[x]; // if (val < 0) // val = 0; // relu() // L2_source_temp += val * val; // source_temp[x] = val; //} //finished relu L2_source_temp = 1.0 / sqrt(L2_source_temp); for (int x = 0; x < Wf2_dim0; x++) { // source_temp[idx_0] = source_temp[idx_0] /sqrt (L2_source_temp); // source_temp[x] *= L2_source_temp; Store<T_STORE>( source_temp + x, (ValueT)(Load<T_LOAD>(source_temp + x) * L2_source_temp)); } // finished L-2 norm for source temp ////////////////////////////////////////////////////////////////////////////////////// // get h_B2^2 k =2. offset = i * result_column; double L2_source_result = 0.0; for (int x = 0; x < Wf2_dim1; x++) { ValueT val = 0; // source_result[offset + x]; // printf ("source_r1_0:%f", source_result[idx_0] ); for (int y = 0; y < Wf2_dim0; y++) val += Load<T_LOAD>(source_temp + y) // source_temp[y] * Load<W_LOAD>(W_f_2 + (y * Wf2_dim1 + x)); if (val < 0) val = 0; // relu() L2_source_result += val * val; Store<T_STORE>(source_result + (offset + x), val); // printf ("source_r1:%f", source_result[idx_0] ); } // got 1st half of h_B2^2 for (int x = 0; x < Wa2_dim1; x++) { // printf ("source_r2_0:%f", source_result[idx_0] ); ValueT val = 0; // source_result[offset + x]; for (int y = 0; y < Wa2_dim0; y++) val += Load<cub::LOAD_LDG>(children_temp + i * Wa2_dim0 + y) // children_temp[i * Wa2_dim0 + y] * Load<W_LOAD>(W_a_2 + (y * Wa2_dim1 + x)); if (val < 0) val = 0; // relu() L2_source_result += val * val; Store<T_STORE>(source_result + (offset + Wf2_dim1 + x), val); } // got 2nd half of h_B2^2 // for (int x =0; x < result_column; x ++ ) //{ // ValueT val = source_result[offset + x]; // if (val < 0) // relu() // val = 0; // L2_source_result += val * val; // source_result[offset + x] = val; //} //finished relu L2_source_result = 1.0 / sqrt(L2_source_result); for (int x = 0; x < result_column; x++) { // source_result[offset + x] *= L2_source_result; Store<cub::STORE_WT>( source_result + (offset + x), (ValueT)(Load<T_LOAD>(source_result + (offset + x)) * L2_source_result)); // printf ("source_r:%f", source_result[idx_0] ); // printf ("ch_t:%f", children_temp[idx_0]); } // finished L-2 norm for source result }, num_sources, util::DEVICE, stream, 640)); } // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); GUARD_CU2(cudaEventRecord(data_slice.d2h_start, stream), "cudaEventRecord failed."); GUARD_CU2( cudaStreamWaitEvent(data_slice.d2h_stream, data_slice.d2h_start, 0), "cudaStreamWaitEvent failed."); GUARD_CU2(cudaMemcpyAsync( data_slice.host_source_result + (((uint64_t)source_start) * result_column), source_result.GetPointer(util::DEVICE), ((uint64_t)num_sources) * result_column * sizeof(ValueT), cudaMemcpyDeviceToHost, data_slice.d2h_stream), "source_result D2H copy failed"); GUARD_CU2(cudaEventRecord(data_slice.d2h_finish, data_slice.d2h_stream), "cudaEventRecord failed."); // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); return retval; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; // auto iteration = enactor_slice.enactor_stats.iteration; // auto &distances = data_slice.distances; // auto &labels = data_slice.labels; // auto &preds = data_slice.preds; // auto label = this -> enactor -> // mgpu_slices[this -> gpu_num].in_iteration[iteration % 2][peer_]; auto expand_op = [] __host__ __device__( VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { /* ValueT in_val = value__associate_ins[in_pos]; ValueT old_val = atomicMin(distances + key, in_val); if (old_val <= in_val) return false; if (labels[key] == label) return false; labels[key] = label; if (!preds.isEmpty()) preds[key] = vertex_associate_ins[in_pos]; */ return true; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } bool Stop_Condition(int gpu_num = 0) { int num_gpus = this->enactor->num_gpus; auto &enactor_slices = this->enactor->enactor_slices; for (int gpu = 0; gpu < num_gpus * num_gpus; gpu++) { auto &retval = enactor_slices[gpu].enactor_stats.retval; if (retval == cudaSuccess) continue; printf("(CUDA error %d @ GPU %d: %s\n", retval, gpu % num_gpus, cudaGetErrorString(retval)); fflush(stdout); return true; } auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor->enactor_slices[this->gpu_num * this->enactor->num_gpus]; // util::PrintMsg("iter = " + // std::to_string(enactor_slice.enactor_stats.iteration) // + ", batch_size = " + std::to_string(data_slice.batch_size) // + ", nodes = " + std::to_string(data_slice.sub_graph -> nodes)); if (enactor_slice.enactor_stats.iteration * data_slice.batch_size < data_slice.sub_graph->nodes) return false; return true; } cudaError_t Compute_OutputLength(int peer_) { return cudaSuccess; } cudaError_t Check_Queue_Size(int peer_) { return cudaSuccess; } }; // end of SSSPIteration /** * @brief SSSP enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase<typename _Problem::GraphT, typename _Problem::VertexT, typename _Problem::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: // Definations typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::ValueT ValueT; typedef typename Problem::GraphT GraphT; typedef typename Problem::LabelT LabelT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef SAGEIterationLoop<EnactorT> IterationT; // Members Problem *problem; IterationT *iterations; /** * \addtogroup PublicInterface * @{ */ /** * @brief SSSPEnactor constructor */ Enactor() : BaseEnactor("sage"), problem(NULL) { this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief SSSPEnactor destructor */ virtual ~Enactor() { // Release(); } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the enactor. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 0, NULL, target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges, this->queue_factors)); } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief Reset enactor * @param[in] src Source node to start primitive. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { if ((this->num_gpus == 1) || (gpu == this->problem->org_graph->GpT::partition_table[src])) { this->thread_slices[gpu].init_size = 1; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer_].frontier; frontier.queue_length = (peer_ == 0) ? 1 : 0; // if (peer_ == 0) //{ // GUARD_CU(frontier.V_Q() -> ForEach( // [src]__host__ __device__ (VertexT &v) // // v = src; // } //} } } // else { // this -> thread_slices[gpu].init_size = 0; // for (int peer_ = 0; peer_ < this -> num_gpus; peer_++) // { // this -> enactor_slices[gpu * this -> num_gpus + peer_] // .frontier.queue_length = 0; // } // } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief one run of sage, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop<0, 1, IterationT>( thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Enacts a SSSP computing on the specified graph. * @param[in] src Source node to start primitive. * \return cudaError_t error message(s), if any */ cudaError_t Enact() { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU SAGE Done.", this->flag & Debug); return retval; } /** @} */ }; } // namespace sage } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <algorithm> #include "cupoch/geometry/image.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" #include "cupoch/visualization/shader/image_shader.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { __device__ uint8_t ConvertColorFromFloatToUnsignedChar(float color) { if (isnan(color)) { return 0; } else { thrust::minimum<float> min; thrust::maximum<float> max; float unified_color = min(1.0f, max(0.0f, color)); return (uint8_t)(unified_color * 255.0f); } } struct copy_float_gray_image_functor { copy_float_gray_image_functor(const uint8_t *gray) : gray_(gray){}; const uint8_t *gray_; __device__ uint8_t operator()(size_t k) const { int idx = k / 3; float *p = (float *)(gray_ + idx * 4); uint8_t color = ConvertColorFromFloatToUnsignedChar(*p); return color; } }; struct copy_float_rgb_image_functor { copy_float_rgb_image_functor(const uint8_t *rgb) : rgb_(rgb){}; const uint8_t *rgb_; __device__ uint8_t operator()(size_t idx) const { float *p = (float *)(rgb_ + idx * 4); return ConvertColorFromFloatToUnsignedChar(*p); } }; struct copy_int16_rgb_image_functor { copy_int16_rgb_image_functor(const uint8_t *rgb) : rgb_(rgb){}; const uint8_t *rgb_; __device__ uint8_t operator()(size_t idx) const { uint16_t *p = (uint16_t *)(rgb_ + idx * 2); return (uint8_t)((*p) & 0xff); } }; struct copy_depth_image_functor { copy_depth_image_functor(const uint8_t *depth, int max_depth) : depth_(depth), max_depth_(max_depth){}; const uint8_t *depth_; const int max_depth_; const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption(); __device__ uint8_t operator()(size_t k) const { thrust::minimum<float> min; int i = k / 3; int j = k % 3; uint16_t *p = (uint16_t *)(depth_ + i * 2); float depth = min(float(*p) / float(max_depth_), 1.0); Eigen::Vector3f color = GetColorMapColor(depth, colormap_option_); return (uint8_t)(color(j) * 255); } }; } // unnamed namespace bool ImageShader::Compile() { if (CompileShaders(image_vertex_shader, NULL, image_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_UV_ = glGetAttribLocation(program_, "vertex_UV"); image_texture_ = glGetUniformLocation(program_, "image_texture"); vertex_scale_ = glGetUniformLocation(program_, "vertex_scale"); return true; } void ImageShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool ImageShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); // Create buffers and bind the geometry const GLfloat vertex_position_buffer_data[18] = { -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, }; const GLfloat vertex_UV_buffer_data[12] = { 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, }; glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_position_buffer_data), vertex_position_buffer_data, GL_STATIC_DRAW); glGenBuffers(1, &vertex_UV_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_UV_buffer_data), vertex_UV_buffer_data, GL_STATIC_DRAW); glGenTextures(1, &image_texture_buffer_); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, num_data_width, num_data_height, 0, GL_RGB, GL_UNSIGNED_BYTE, 0); if (option.interpolation_option_ == RenderOption::TextureInterpolationOption::Nearest) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); } else { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glGenerateMipmap(GL_TEXTURE_2D); } glGenBuffers(1, &image_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); size_t data_size = GetDataSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, data_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], image_pixel_buffer_, cudaGraphicsMapFlagsNone)); uint8_t *raw_render_image_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(1, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_render_image_ptr, &n_bytes, cuda_graphics_resources_[0])); thrust::device_ptr<uint8_t> dev_render_image_ptr = thrust::device_pointer_cast(raw_render_image_ptr); if (PrepareBinding(geometry, option, view, dev_render_image_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(1); bound_ = true; return true; } bool ImageShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetDataHeight(geometry); const size_t num_data_width = GetDataWidth(geometry); glUseProgram(program_); glUniform3fv(vertex_scale_, 1, vertex_scale_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, image_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, image_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, GL_RGB, GL_UNSIGNED_BYTE, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(image_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_UV_); glBindBuffer(GL_ARRAY_BUFFER, vertex_UV_buffer_); glVertexAttribPointer(vertex_UV_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_UV_); return true; } void ImageShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); glDeleteBuffers(1, &image_pixel_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_UV_buffer_); glDeleteTextures(1, &image_texture_buffer_); bound_ = false; } } bool ImageShaderForImage::PrepareRendering(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; GLfloat ratio_x, ratio_y; switch (option.image_stretch_option_) { case RenderOption::ImageStretchOption::StretchKeepRatio: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); if (ratio_x < ratio_y) { ratio_x /= ratio_y; ratio_y = 1.0f; } else { ratio_y /= ratio_x; ratio_x = 1.0f; } break; case RenderOption::ImageStretchOption::StretchWithWindow: ratio_x = 1.0f; ratio_y = 1.0f; break; case RenderOption::ImageStretchOption::OriginalSize: default: ratio_x = GLfloat(image.width_) / GLfloat(view.GetWindowWidth()); ratio_y = GLfloat(image.height_) / GLfloat(view.GetWindowHeight()); break; } vertex_scale_data_(0) = ratio_x; vertex_scale_data_(1) = ratio_y; vertex_scale_data_(2) = 1.0f; glDisable(GL_DEPTH_TEST); return true; } bool ImageShaderForImage::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<uint8_t> &render_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Image) { PrintShaderWarning("Rendering type is not geometry::Image."); return false; } const geometry::Image &image = (const geometry::Image &)geometry; if (image.HasData() == false) { PrintShaderWarning("Binding failed with empty image."); return false; } if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 1) { thrust::copy(image.data_.begin(), image.data_.end(), render_image); } else { if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 1) { // grayscale image thrust::repeated_range< utility::device_vector<uint8_t>::const_iterator> range(image.data_.begin(), image.data_.end(), 3); thrust::copy(range.begin(), range.end(), render_image); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 4) { // grayscale image with floating point per channel copy_float_gray_image_functor func( thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 4) { // RGB image with floating point per channel copy_float_rgb_image_functor func( thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 3 && image.bytes_per_channel_ == 2) { // image with RGB channels, each channel is a 16-bit integer copy_int16_rgb_image_functor func( thrust::raw_pointer_cast(image.data_.data())); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( image.height_ * image.width_ * 3), render_image, func); } else if (image.num_of_channels_ == 1 && image.bytes_per_channel_ == 2) { // depth image, one channel of 16-bit integer const int max_depth = option.image_max_depth_; copy_depth_image_functor func( thrust::raw_pointer_cast(image.data_.data()), max_depth); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( image.height_ * image.width_ * 3), render_image, func); } } draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = 6; return true; } size_t ImageShaderForImage::GetDataSize( const geometry::Geometry &geometry) const { return GetDataWidth(geometry) * GetDataHeight(geometry) * 3; } size_t ImageShaderForImage::GetDataHeight( const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).height_; } size_t ImageShaderForImage::GetDataWidth( const geometry::Geometry &geometry) const { return ((const geometry::Image &)geometry).width_; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <assert.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include <hipsparse.h> #include <cusparse.h> void printCsr( int m, int n, int nnz, // CHECK: const hipsparseMatDescr_t descrA, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const char* name) { // CHECK: const int base = (hipsparseGetMatIndexBase(descrA) != HIPSPARSE_INDEX_BASE_ONE) ? 0 : 1; const int base = (cusparseGetMatIndexBase(descrA) != CUSPARSE_INDEX_BASE_ONE) ? 0 : 1; printf("matrix %s is %d-by-%d, nnz=%d, base=%d, output base-1\n", name, m, n, nnz, base); for (int row = 0; row < m; row++) { const int start = csrRowPtrA[row] - base; const int end = csrRowPtrA[row + 1] - base; for (int colidx = start; colidx < end; colidx++) { const int col = csrColIndA[colidx] - base; const float Areg = csrValA[colidx]; printf("%s(%d,%d) = %f\n", name, row + 1, col + 1, Areg); } } } int main(int argc, char*argv[]) { // CHECK: hipsparseHandle_t handle = NULL; cusparseHandle_t handle = NULL; // CHECK: hipStream_t stream = NULL; cudaStream_t stream = NULL; // CHECK: hipsparseMatDescr_t descrA = NULL; cusparseMatDescr_t descrA = NULL; // CHECK: hipsparseMatDescr_t descrC = NULL; cusparseMatDescr_t descrC = NULL; // CHECK: hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; cusparseStatus_t status = CUSPARSE_STATUS_SUCCESS; // CHECK: hipError_t cudaStat1 = hipSuccess; cudaError_t cudaStat1 = cudaSuccess; const int m = 4; const int n = 4; const int nnzA = 9; /* * | 1 0 2 -3 | * | 0 4 0 0 | * A = | 5 0 6 7 | * | 0 8 0 9 | * */ const int csrRowPtrA[m + 1] = { 1, 4, 5, 8, 10 }; const int csrColIndA[nnzA] = { 1, 3, 4, 2, 1, 3, 4, 2, 4 }; const float csrValA[nnzA] = { 1, 2, -3, 4, 5, 6, 7, 8, 9 }; int* csrRowPtrC = NULL; int* csrColIndC = NULL; float* csrValC = NULL; int *d_csrRowPtrA = NULL; int *d_csrColIndA = NULL; float *d_csrValA = NULL; int *d_csrRowPtrC = NULL; int *d_csrColIndC = NULL; float *d_csrValC = NULL; size_t lworkInBytes = 0; char *d_work = NULL; int nnzC = 0; float threshold = 4.1; /* remove Aij <= 4.1 */ // float threshold = 0; /* remove zeros */ printf("example of pruneCsr2csr \n"); printf("prune |A(i,j)| <= threshold \n"); printf("threshold = %E \n", threshold); /* step 1: create cusparse handle, bind a stream */ // CHECK: cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: status = hipsparseCreate(&handle); status = cusparseCreate(&handle); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // CHECK: status = hipsparseSetStream(handle, stream); status = cusparseSetStream(handle, stream); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 2: configuration of matrix A and C */ // CHECK: status = hipsparseCreateMatDescr(&descrA); status = cusparseCreateMatDescr(&descrA); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* A is base-1*/ // CHECK: hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); // CHECK: hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); // CHECK: status = hipsparseCreateMatDescr(&descrC); status = cusparseCreateMatDescr(&descrC); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* C is base-0 */ // CHECK: hipsparseSetMatIndexBase(descrC, HIPSPARSE_INDEX_BASE_ZERO); cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ZERO); // CHECK: hipsparseSetMatType(descrC, HIPSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL); printCsr(m, n, nnzA, descrA, csrValA, csrRowPtrA, csrColIndA, "A"); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrRowPtrA, sizeof(int)*(m + 1)); cudaStat1 = cudaMalloc((void**)&d_csrRowPtrA, sizeof(int)*(m + 1)); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrColIndA, sizeof(int)*nnzA); cudaStat1 = cudaMalloc((void**)&d_csrColIndA, sizeof(int)*nnzA); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrValA, sizeof(float)*nnzA); cudaStat1 = cudaMalloc((void**)&d_csrValA, sizeof(float)*nnzA); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrRowPtrC, sizeof(int)*(m + 1)); cudaStat1 = cudaMalloc((void**)&d_csrRowPtrC, sizeof(int)*(m + 1)); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int)*(m + 1), hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int)*(m + 1), cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrColIndA, csrColIndA, sizeof(int)*nnzA, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrColIndA, csrColIndA, sizeof(int)*nnzA, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrValA, csrValA, sizeof(float)*nnzA, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrValA, csrValA, sizeof(float)*nnzA, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 3: query workspace */ // TODO: status = hipsparseSpruneCsr2csr_bufferSizeExt( status = cusparseSpruneCsr2csr_bufferSizeExt( handle, m, n, nnzA, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, &threshold, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC, &lworkInBytes); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); printf("lworkInBytes (prune) = %lld \n", (long long)lworkInBytes); // CHECK: if (NULL != d_work) { hipFree(d_work); } if (NULL != d_work) { cudaFree(d_work); } // cudaStat1 = hipMalloc((void**)&d_work, lworkInBytes); cudaStat1 = cudaMalloc((void**)&d_work, lworkInBytes); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 4: compute csrRowPtrC and nnzC */ // TODO: status = hipsparseSpruneCsr2csrNnz( status = cusparseSpruneCsr2csrNnz( handle, m, n, nnzA, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, &threshold, descrC, d_csrRowPtrC, &nnzC, /* host */ d_work); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // CHECK: cudaStat1 = hipDeviceSynchronize(); cudaStat1 = cudaDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); printf("nnzC = %d\n", nnzC); if (0 == nnzC) { printf("C is empty \n"); return 0; } /* step 5: compute csrColIndC and csrValC */ // CHECK: cudaStat1 = hipMalloc((void**)&d_csrColIndC, sizeof(int) * nnzC); cudaStat1 = cudaMalloc((void**)&d_csrColIndC, sizeof(int) * nnzC); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrValC, sizeof(float) * nnzC); cudaStat1 = cudaMalloc((void**)&d_csrValC, sizeof(float) * nnzC); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // TODO: status = hipsparseSpruneCsr2csr( status = cusparseSpruneCsr2csr( handle, m, n, nnzA, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, &threshold, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC, d_work); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // CHECK: cudaStat1 = hipDeviceSynchronize(); cudaStat1 = cudaDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 6: output C */ csrRowPtrC = (int*)malloc(sizeof(int)*(m + 1)); csrColIndC = (int*)malloc(sizeof(int)*nnzC); csrValC = (float*)malloc(sizeof(float)*nnzC); assert(NULL != csrRowPtrC); assert(NULL != csrColIndC); assert(NULL != csrValC); // CHECK: cudaStat1 = hipMemcpy(csrRowPtrC, d_csrRowPtrC, sizeof(int)*(m + 1), hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(csrRowPtrC, d_csrRowPtrC, sizeof(int)*(m + 1), cudaMemcpyDeviceToHost); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(csrColIndC, d_csrColIndC, sizeof(int)*nnzC, hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(csrColIndC, d_csrColIndC, sizeof(int)*nnzC, cudaMemcpyDeviceToHost); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(csrValC, d_csrValC, sizeof(float)*nnzC, hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(csrValC, d_csrValC, sizeof(float)*nnzC, cudaMemcpyDeviceToHost); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); printCsr(m, n, nnzC, descrC, csrValC, csrRowPtrC, csrColIndC, "C"); /* free resources */ // CHECK: if (d_csrRowPtrA) hipFree(d_csrRowPtrA); if (d_csrRowPtrA) cudaFree(d_csrRowPtrA); // CHECK: if (d_csrColIndA) hipFree(d_csrColIndA); if (d_csrColIndA) cudaFree(d_csrColIndA); // CHECK: if (d_csrValA) hipFree(d_csrValA); if (d_csrValA) cudaFree(d_csrValA); // CHECK: if (d_csrRowPtrC) hipFree(d_csrRowPtrC); if (d_csrRowPtrC) cudaFree(d_csrRowPtrC); // CHECK: if (d_csrColIndC) hipFree(d_csrColIndC); if (d_csrColIndC) cudaFree(d_csrColIndC); // CHECK: if (d_csrValC) hipFree(d_csrValC); if (d_csrValC) cudaFree(d_csrValC); if (csrRowPtrC) free(csrRowPtrC); if (csrColIndC) free(csrColIndC); if (csrValC) free(csrValC); // CHECK: if (handle) hipsparseDestroy(handle); if (handle) cusparseDestroy(handle); // CHECK: if (stream) hipStreamDestroy(stream); if (stream) cudaStreamDestroy(stream); // CHECK: if (descrA) hipsparseDestroyMatDescr(descrA); if (descrA) cusparseDestroyMatDescr(descrA); // CHECK: if (descrC) hipsparseDestroyMatDescr(descrC); if (descrC) cusparseDestroyMatDescr(descrC); // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
the_stack
#include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/iterator_utilities.hpp> #include <cudf_test/tdigest_utilities.cuh> #include <cudf_test/type_lists.hpp> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> namespace cudf { namespace test { using namespace cudf; /** * @brief Functor to generate a tdigest by key. * */ struct tdigest_gen_grouped { template < typename T, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& keys, column_view const& values, int delta) { cudf::table_view t({keys}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({values, std::move(aggregations)}); auto result = gb.aggregate(requests); return std::move(result.second[0].results[0]); } template < typename T, typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& keys, column_view const& values, int delta) { CUDF_FAIL("Invalid tdigest test type"); } }; /** * @brief Functor for generating a tdigest using groupby with a constant key. * */ struct tdigest_groupby_simple_op { std::unique_ptr<column> operator()(column_view const& values, int delta) const { // make a simple set of matching keys. auto keys = cudf::make_fixed_width_column( data_type{type_id::INT32}, values.size(), mask_state::UNALLOCATED); thrust::fill(rmm::exec_policy(rmm::cuda_stream_default), keys->mutable_view().template begin<int>(), keys->mutable_view().template end<int>(), 0); cudf::table_view t({*keys}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({values, std::move(aggregations)}); auto result = gb.aggregate(requests); return std::move(result.second[0].results[0]); } }; /** * @brief Functor for merging tdigests using groupby with a constant key. * */ struct tdigest_groupby_simple_merge_op { std::unique_ptr<column> operator()(column_view const& merge_values, int merge_delta) const { // make a simple set of matching keys. auto merge_keys = cudf::make_fixed_width_column( data_type{type_id::INT32}, merge_values.size(), mask_state::UNALLOCATED); thrust::fill(rmm::exec_policy(rmm::cuda_stream_default), merge_keys->mutable_view().template begin<int>(), merge_keys->mutable_view().template end<int>(), 0); cudf::table_view key_table({*merge_keys}); cudf::groupby::groupby gb(key_table); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back( cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(merge_delta)); requests.push_back({merge_values, std::move(aggregations)}); auto result = gb.aggregate(requests); return std::move(result.second[0].results[0]); } }; template <typename T> struct TDigestAllTypes : public cudf::test::BaseFixture { }; TYPED_TEST_SUITE(TDigestAllTypes, cudf::test::NumericTypes); TYPED_TEST(TDigestAllTypes, Simple) { using T = TypeParam; tdigest_simple_aggregation<T>(tdigest_groupby_simple_op{}); } TYPED_TEST(TDigestAllTypes, SimpleWithNulls) { using T = TypeParam; tdigest_simple_with_nulls_aggregation<T>(tdigest_groupby_simple_op{}); } TYPED_TEST(TDigestAllTypes, AllNull) { using T = TypeParam; tdigest_simple_all_nulls_aggregation<T>(tdigest_groupby_simple_op{}); } TYPED_TEST(TDigestAllTypes, LargeGroups) { auto _values = generate_standardized_percentile_distribution(data_type{type_id::FLOAT64}); int const delta = 1000; // generate a random set of keys std::vector<int> h_keys; h_keys.reserve(_values->size()); auto iter = thrust::make_counting_iterator(0); std::transform(iter, iter + _values->size(), std::back_inserter(h_keys), [](int i) { return static_cast<int>(round(rand_range(0, 8))); }); cudf::test::fixed_width_column_wrapper<int> _keys(h_keys.begin(), h_keys.end()); // group the input values together cudf::table_view k({_keys}); cudf::groupby::groupby setup_gb(k); cudf::table_view v({*_values}); auto groups = setup_gb.get_groups(v); // slice it all up so we have keys/columns for everything. std::vector<column_view> keys; std::vector<column_view> values; for (size_t idx = 0; idx < groups.offsets.size() - 1; idx++) { auto k = cudf::slice(groups.keys->get_column(0), {groups.offsets[idx], groups.offsets[idx + 1]}); keys.push_back(k[0]); auto v = cudf::slice(groups.values->get_column(0), {groups.offsets[idx], groups.offsets[idx + 1]}); values.push_back(v[0]); } // generate a seperate tdigest for each group std::vector<std::unique_ptr<column>> parts; std::transform( iter, iter + values.size(), std::back_inserter(parts), [&keys, &values, delta](int i) { cudf::table_view t({keys[i]}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({values[i], std::move(aggregations)}); auto result = gb.aggregate(requests); return std::move(result.second[0].results[0]); }); std::vector<column_view> part_views; std::transform(parts.begin(), parts.end(), std::back_inserter(part_views), [](std::unique_ptr<column> const& col) { return col->view(); }); auto merged_parts = cudf::concatenate(part_views); // generate a tdigest on the whole input set cudf::table_view t({_keys}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({*_values, std::move(aggregations)}); auto result = gb.aggregate(requests); // verify that they end up the same. CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*result.second[0].results[0], *merged_parts); } struct TDigestTest : public cudf::test::BaseFixture { }; TEST_F(TDigestTest, EmptyMixed) { cudf::test::fixed_width_column_wrapper<double> values{ {123456.78, 10.0, 20.0, 25.0, 30.0, 40.0, 50.0, 60.0, 70.0}, {1, 0, 0, 1, 0, 0, 1, 1, 0}}; cudf::test::strings_column_wrapper keys{"b", "a", "c", "c", "d", "d", "e", "e", "f"}; auto const delta = 1000; cudf::table_view t({keys}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({values, std::move(aggregations)}); auto result = gb.aggregate(requests); using FCW = cudf::test::fixed_width_column_wrapper<double>; auto expected = make_expected_tdigest_column({{FCW{}, FCW{}, 0, 0}, {FCW{123456.78}, FCW{1.0}, 123456.78, 123456.78}, {FCW{25.0}, FCW{1.0}, 25.0, 25.0}, {FCW{}, FCW{}, 0, 0}, {FCW{50.0, 60.0}, FCW{1.0, 1.0}, 50.0, 60.0}, {FCW{}, FCW{}, 0, 0}}); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result.second[0].results[0], *expected); } TEST_F(TDigestTest, LargeInputDouble) { tdigest_simple_large_input_double_aggregation(tdigest_groupby_simple_op{}); } TEST_F(TDigestTest, LargeInputInt) { tdigest_simple_large_input_int_aggregation(tdigest_groupby_simple_op{}); } TEST_F(TDigestTest, LargeInputDecimal) { tdigest_simple_large_input_decimal_aggregation(tdigest_groupby_simple_op{}); } struct TDigestMergeTest : public cudf::test::BaseFixture { }; // Note: there is no need to test different types here as the internals of a tdigest are always // the same regardless of input. TEST_F(TDigestMergeTest, Simple) { tdigest_merge_simple(tdigest_groupby_simple_op{}, tdigest_groupby_simple_merge_op{}); } struct key_groups { __device__ size_type operator()(size_type i) { return i < 250000 ? 0 : 1; } }; TEST_F(TDigestMergeTest, Grouped) { auto values = generate_standardized_percentile_distribution(data_type{type_id::FLOAT64}); CUDF_EXPECTS(values->size() == 750000, "Unexpected distribution size"); // all in the same group auto keys = cudf::make_fixed_width_column( data_type{type_id::INT32}, values->size(), mask_state::UNALLOCATED); // 3 groups. 0-250000 in group 0. 250000-500000 in group 1 and 500000-750000 in group 1 auto key_iter = cudf::detail::make_counting_transform_iterator(0, key_groups{}); thrust::copy(rmm::exec_policy(rmm::cuda_stream_default), key_iter, key_iter + keys->size(), keys->mutable_view().template begin<int>()); auto split_values = cudf::split(*values, {250000, 500000}); auto grouped_split_values = cudf::split(*values, {250000}); auto split_keys = cudf::split(*keys, {250000, 500000}); int const delta = 1000; // generate seperate digests std::vector<std::unique_ptr<column>> parts; auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + split_values.size(), std::back_inserter(parts), [&split_keys, &split_values, delta](int i) { cudf::table_view t({split_keys[i]}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({split_values[i], std::move(aggregations)}); auto result = gb.aggregate(requests); return std::move(result.second[0].results[0]); }); std::vector<column_view> part_views; std::transform(parts.begin(), parts.end(), std::back_inserter(part_views), [](std::unique_ptr<column> const& col) { return col->view(); }); // merge delta = 1000 { int const merge_delta = 1000; // merge them auto merge_input = cudf::concatenate(part_views); cudf::test::fixed_width_column_wrapper<int> merge_keys{0, 1, 1}; cudf::table_view key_table({merge_keys}); cudf::groupby::groupby gb(key_table); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back( cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(merge_delta)); requests.push_back({*merge_input, std::move(aggregations)}); auto result = gb.aggregate(requests); CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size"); cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]); // verify centroids std::vector<expected_value> expected{// group 0 {0, 0.00013945158577498588, 2}, {10, 0.04804393446447509375, 50}, {66, 2.10089484962640948851, 316}, {139, 8.92977366346101852912, 601}, {243, 23.89152910016953867967, 784}, {366, 41.62636569363655780762, 586}, {432, 47.73085102980330418632, 326}, {460, 49.20637897385523018556, 196}, {501, 49.99998311512171511595, 1}, // group 1 {502 + 0, 50.00022508669655252334, 2}, {502 + 15, 50.05415694538910287292, 74}, {502 + 70, 51.21421484112906341579, 334}, {502 + 150, 55.19367617848146778670, 635}, {502 + 260, 63.24605285552920008740, 783}, {502 + 380, 76.99522005804017510400, 1289}, {502 + 440, 84.22673817294192133431, 758}, {502 + 490, 88.11787981529532487457, 784}, {502 + 555, 93.02766411136053648079, 704}, {502 + 618, 96.91486035315536184953, 516}, {502 + 710, 99.87755861436669135855, 110}, {502 + 733, 99.99970905482754801596, 1}}; tdigest_sample_compare(tdv, expected); // verify min/max auto split_results = cudf::split(*result.second[0].results[0], {1}); auto iter = thrust::make_counting_iterator(0); std::for_each(iter, iter + split_results.size(), [&](size_type i) { auto copied = std::make_unique<column>(split_results[i]); tdigest_minmax_compare<double>(cudf::tdigest::tdigest_column_view(*copied), grouped_split_values[i]); }); } // merge delta = 100 { int const merge_delta = 100; // merge them auto merge_input = cudf::concatenate(part_views); cudf::test::fixed_width_column_wrapper<int> merge_keys{0, 1, 1}; cudf::table_view key_table({merge_keys}); cudf::groupby::groupby gb(key_table); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back( cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(merge_delta)); requests.push_back({*merge_input, std::move(aggregations)}); auto result = gb.aggregate(requests); CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size"); cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]); // verify centroids std::vector<expected_value> expected{// group 0 {0, 0.02182479870203561656, 231}, {3, 0.60625795002234528219, 1688}, {13, 8.40462931740497687372, 5867}, {27, 28.79997783486397722186, 7757}, {35, 40.22391421196020644402, 6224}, {45, 48.96506331299028857984, 2225}, {50, 49.99979491345574444949, 4}, // group 1 {51 + 0, 50.02171921312970681583, 460}, {51 + 5, 51.45308398121498072442, 5074}, {51 + 11, 55.96880716301625113829, 10011}, {51 + 22, 70.18029861315150697010, 15351}, {51 + 38, 92.65943436519887654867, 10718}, {51 + 47, 99.27745505225347244505, 3639}}; tdigest_sample_compare(tdv, expected); // verify min/max auto split_results = cudf::split(*result.second[0].results[0], {1}); auto iter = thrust::make_counting_iterator(0); std::for_each(iter, iter + split_results.size(), [&](size_type i) { auto copied = std::make_unique<column>(split_results[i]); tdigest_minmax_compare<double>(cudf::tdigest::tdigest_column_view(*copied), grouped_split_values[i]); }); } // merge delta = 10 { int const merge_delta = 10; // merge them auto merge_input = cudf::concatenate(part_views); cudf::test::fixed_width_column_wrapper<int> merge_keys{0, 1, 1}; cudf::table_view key_table({merge_keys}); cudf::groupby::groupby gb(key_table); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back( cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(merge_delta)); requests.push_back({*merge_input, std::move(aggregations)}); auto result = gb.aggregate(requests); CUDF_EXPECTS(result.second[0].results[0]->size() == 2, "Unexpected tdigest merge result size"); cudf::tdigest::tdigest_column_view tdv(*result.second[0].results[0]); // verify centroids std::vector<expected_value> expected{// group 0 {0, 2.34644806683495144028, 23623}, {1, 10.95523693698660672169, 62290}, {2, 24.90731657803452847588, 77208}, {3, 38.88062495289155862110, 62658}, {4, 47.56288303840698006297, 24217}, {5, 49.99979491345574444949, 4}, // group 1 {6 + 0, 52.40174463129091719793, 47410}, {6 + 1, 60.97025126481504031517, 124564}, {6 + 2, 74.91722742839780835311, 154387}, {6 + 3, 88.87559489177009197647, 124810}, {6 + 4, 97.55823307073454486726, 48817}, {6 + 5, 99.99901807905750672489, 12}}; tdigest_sample_compare(tdv, expected); // verify min/max auto split_results = cudf::split(*result.second[0].results[0], {1}); auto iter = thrust::make_counting_iterator(0); std::for_each(iter, iter + split_results.size(), [&](size_type i) { auto copied = std::make_unique<column>(split_results[i]); tdigest_minmax_compare<double>(cudf::tdigest::tdigest_column_view(*copied), grouped_split_values[i]); }); } } TEST_F(TDigestMergeTest, Empty) { tdigest_merge_empty(tdigest_groupby_simple_merge_op{}); } TEST_F(TDigestMergeTest, EmptyGroups) { cudf::test::fixed_width_column_wrapper<double> values_b{{126, 15, 1, 99, 67, 55, 2}, {1, 0, 0, 1, 1, 1, 1}}; cudf::test::fixed_width_column_wrapper<double> values_d{{100, 200, 300, 400, 500, 600, 700}, {1, 1, 1, 1, 1, 1, 0}}; cudf::test::fixed_width_column_wrapper<int> keys{0, 0, 0, 0, 0, 0, 0}; int const delta = 1000; auto a = cudf::detail::tdigest::make_empty_tdigest_column(); auto b = cudf::type_dispatcher( static_cast<column_view>(values_b).type(), tdigest_gen_grouped{}, keys, values_b, delta); auto c = cudf::detail::tdigest::make_empty_tdigest_column(); auto d = cudf::type_dispatcher( static_cast<column_view>(values_d).type(), tdigest_gen_grouped{}, keys, values_d, delta); auto e = cudf::detail::tdigest::make_empty_tdigest_column(); std::vector<column_view> cols; cols.push_back(*a); cols.push_back(*b); cols.push_back(*c); cols.push_back(*d); cols.push_back(*e); auto values = cudf::concatenate(cols); cudf::test::fixed_width_column_wrapper<int> merge_keys{0, 0, 1, 0, 2}; cudf::table_view t({merge_keys}); cudf::groupby::groupby gb(t); std::vector<cudf::groupby::aggregation_request> requests; std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations; aggregations.push_back(cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(delta)); requests.push_back({*values, std::move(aggregations)}); auto result = gb.aggregate(requests); using FCW = cudf::test::fixed_width_column_wrapper<double>; cudf::test::fixed_width_column_wrapper<double> expected_means{ 2, 55, 67, 99, 100, 126, 200, 300, 400, 500, 600}; cudf::test::fixed_width_column_wrapper<double> expected_weights{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; auto expected = make_expected_tdigest_column( {{expected_means, expected_weights, 2, 600}, {FCW{}, FCW{}, 0, 0}, {FCW{}, FCW{}, 0, 0}}); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected, *result.second[0].results[0]); } } // namespace test } // namespace cudf
the_stack
namespace caffe { static __global__ void kernel_test_forward( const int num, const int channels, const int spatial_dim, const float* scale, const float* bias, const float* mean, const float* var, const float eps, const float* bottom_data, float* top_data) { CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) { int c = (index / spatial_dim) % channels; top_data[index] = ((bottom_data[index] - mean[c]) / sqrt(var[c] + eps)) * scale[c] + bias[c]; } } static __global__ void kernel_test_backward( const int num, const int channels, const int spatial_dim, const float* scale, const float* bias, const float* mean, const float* var, const float eps, const float* top_diff, float* bottom_diff) { CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) { int c = (index / spatial_dim) % channels; bottom_diff[index] = top_diff[index] / sqrt(var[c] + eps) * scale[c]; } } static __global__ void kernel_local_stats(int num, int channels, int spatial_dim, const float norm_factor, const float* bottom_data, float* mean, float* var) { // store local E[x] to mean, E[x^2] to var temporarily __shared__ float buffer1[CAFFE_CUDA_NUM_THREADS]; __shared__ float buffer2[CAFFE_CUDA_NUM_THREADS]; const int tid = threadIdx.x; const int c = blockIdx.x; // load and accumulate data on each thread buffer1[tid] = buffer2[tid] = 0; for (int i = tid; i < num * spatial_dim; i += blockDim.x) { const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim; buffer1[tid] += bottom_data[index]; buffer2[tid] += bottom_data[index] * bottom_data[index]; } __syncthreads(); // do tree reduction for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { buffer1[tid] += buffer1[tid + s]; buffer2[tid] += buffer2[tid + s]; } __syncthreads(); } // save the result back if (tid == 0) { mean[c] = buffer1[0] / norm_factor; var[c] = buffer2[0] / norm_factor; } } static __global__ void kernel_backward_scale_bias( const int num, const int channels, const int spatial_dim, const float* mean, const float* var, const float eps, const float* top_diff, const float* bottom_data, float* scale_diff, float* bias_diff) { __shared__ float buffer1[CAFFE_CUDA_NUM_THREADS]; __shared__ float buffer2[CAFFE_CUDA_NUM_THREADS]; const int tid = threadIdx.x; const int c = blockIdx.x; // load and accumulate data on each thread buffer1[tid] = buffer2[tid] = 0; for (int i = tid; i < num * spatial_dim; i += blockDim.x) { const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim; buffer1[tid] += top_diff[index] * (bottom_data[index] - mean[c]) / sqrt(var[c] + eps); buffer2[tid] += top_diff[index]; } __syncthreads(); // do tree reduction for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { buffer1[tid] += buffer1[tid + s]; buffer2[tid] += buffer2[tid + s]; } __syncthreads(); } // save the result back if (tid == 0) { scale_diff[c] = buffer1[0]; bias_diff[c] = buffer2[0]; } } static __global__ void kernel_backward_bottom( const int num, const int channels, const int spatial_dim, const float* scale, const float* bias, const float* mean, const float* var, const float eps, const float norm_factor, const float* top_diff, const float* scale_diff, const float* bias_diff, const float* bottom_data, float* bottom_diff) { CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) { int c = (index / spatial_dim) % channels; const float inv_std = float(1) / sqrt(var[c] + eps); const float x_norm = (bottom_data[index] - mean[c]) * inv_std; bottom_diff[index] = scale[c] * inv_std * (top_diff[index] - (x_norm * scale_diff[c] + bias_diff[c]) / norm_factor); } } void ParallelBatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { if (Caffe::number_collect_sample != -1) { CHECK_EQ(this->parallel_blobs_.size(),4*NGPUS); if (Caffe::number_collect_sample == 0) { caffe_gpu_set(this->blobs_[2]->count(),float(0),this->blobs_[2]->mutable_gpu_data()); caffe_gpu_set(this->blobs_[3]->count(),float(0),this->blobs_[3]->mutable_gpu_data()); } for (int i = 0; i < NGPUS; i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),NULL); } for (int i = 0; i < NGPUS; i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),NULL); } } #if 0 for (int i = 0; i < bottom.size(); ++i) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[0*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i)); } for (int i = 0; i < bottom.size(); ++i) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[1*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i)); } for (int i = 0; i < bottom.size(); ++i) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i)); } for (int i = 0; i < bottom.size(); ++i) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(), ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i)); } #endif int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); const int m = num * height * width * NGPUS; //---------------------------------------------------- // compute local E[x] and E[x^2] if (Caffe::bn_state() == "learned") { if (Caffe::number_collect_sample == 0) { for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); caffe_gpu_set(this->parallel_blobs_[2*NGPUS+i]->count(),float(0),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data()); caffe_gpu_set(this->parallel_blobs_[3*NGPUS+i]->count(),float(0),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data()); } } for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); kernel_local_stats<<<channels, CAFFE_CUDA_NUM_THREADS>>>( num, channels, height * width, float(m), bottom[i]->gpu_data(), parallel_mean_buffer_[i]->mutable_gpu_data(), parallel_var_buffer_[i]->mutable_gpu_data()); } // sync E[x] and E[x^2] REDUCE_DATA(parallel_mean_buffer_); REDUCE_DATA(parallel_var_buffer_); for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); caffe_gpu_mul(channels, parallel_mean_buffer_[i]->gpu_data(), parallel_mean_buffer_[i]->gpu_data(), top[i]->mutable_gpu_data()); // reuse the top buffer caffe_gpu_sub(channels, parallel_var_buffer_[i]->gpu_data(), top[i]->gpu_data(), parallel_var_buffer_[i]->mutable_gpu_data()); } float factor; if (Caffe::number_collect_sample == -1) factor = 0.01; else factor = float(1)/float(Caffe::number_collect_sample+1); for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); caffe_gpu_axpby(parallel_mean_buffer_[i]->count(), factor, parallel_mean_buffer_[i]->gpu_data(), 1-factor, this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data()); caffe_gpu_axpby(parallel_var_buffer_[i]->count(), factor, parallel_var_buffer_[i]->gpu_data(), 1-factor, this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data()); } for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); kernel_test_forward<<<CAFFE_GET_BLOCKS(bottom[i]->count()),CAFFE_CUDA_NUM_THREADS>>> ( num, channels, height * width, this->parallel_blobs_[0*NGPUS+i]->gpu_data(), this->parallel_blobs_[1*NGPUS+i]->gpu_data(), parallel_mean_buffer_[i]->gpu_data(), parallel_var_buffer_[i]->gpu_data(), float(BN_EPS), bottom[i]->gpu_data(), top[i]->mutable_gpu_data()); } } else { for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); kernel_test_forward<<<CAFFE_GET_BLOCKS(bottom[i]->count()), CAFFE_CUDA_NUM_THREADS>>> ( num, channels, height * width, this->parallel_blobs_[0*NGPUS+i]->gpu_data(), this->parallel_blobs_[1*NGPUS+i]->gpu_data(), this->parallel_blobs_[2*NGPUS+i]->gpu_data(), this->parallel_blobs_[3*NGPUS+i]->gpu_data(), float(BN_EPS), bottom[i]->gpu_data(), top[i]->mutable_gpu_data()); } } //---------------------------------------------------- if (Caffe::number_collect_sample != -1) { for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclReduce( this->parallel_blobs_[2*NGPUS+i]->gpu_data(),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(), this->parallel_blobs_[2*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL); } for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); ncclReduce( this->parallel_blobs_[3*NGPUS+i]->gpu_data(),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(), this->parallel_blobs_[3*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL); } CUDA_CHECK(cudaSetDevice(Caffe::GPUs[0])); caffe_gpu_scal(this->blobs_[2]->count(),float(1)/float(NGPUS),this->blobs_[2]->mutable_gpu_data()); caffe_gpu_scal(this->blobs_[3]->count(),float(1)/float(NGPUS),this->blobs_[3]->mutable_gpu_data()); } } void ParallelBatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); // compute local scale and bias diff for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); kernel_backward_scale_bias<<<channels, CAFFE_CUDA_NUM_THREADS>>>( num, channels, height * width, parallel_mean_buffer_[i]->gpu_data(), parallel_var_buffer_[i]->gpu_data(), float(BN_EPS), top[i]->gpu_diff(), bottom[i]->gpu_data(), parallel_mean_buffer_[i]->mutable_gpu_diff(), // temp use for local scale diff parallel_var_buffer_[i]->mutable_gpu_diff() // temp use for local bias diff ); } // sync scale and bias diff REDUCE_DIFF(parallel_mean_buffer_) REDUCE_DIFF(parallel_var_buffer_); // add to param blobs diff for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); caffe_gpu_axpy(channels, float(1) / float(NGPUS), parallel_mean_buffer_[i]->gpu_diff(), this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_diff()); caffe_gpu_axpy(channels, float(1) / float(NGPUS), parallel_var_buffer_[i]->gpu_diff(), this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_diff()); } // compute bottom diff for(int i=0;i<NGPUS;i++) { CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i])); kernel_backward_bottom<<<CAFFE_GET_BLOCKS(bottom[i]->count()), CAFFE_CUDA_NUM_THREADS>>>( num, channels, height * width, this->parallel_blobs_[0*NGPUS+i]->gpu_data(), this->parallel_blobs_[1*NGPUS+i]->gpu_data(), parallel_mean_buffer_[i]->gpu_data(), parallel_var_buffer_[i]->gpu_data(), float(BN_EPS), float(num * height * width * NGPUS), top[i]->gpu_diff(), parallel_mean_buffer_[i]->gpu_diff(), parallel_var_buffer_[i]->gpu_diff(), bottom[i]->gpu_data(), bottom[i]->mutable_gpu_diff()); } } void ParallelBatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
the_stack
#include <assert.h> #include <cuda_runtime.h> #include "OpenGlCudaHelper.h" // CUDA facts: // // On devices of compute capability 2.x and beyond, 32-bit integer multiplication is natively supported, // but 24-bit integer multiplication is not. __[u]mul24 is therefore implemented using multiple instructions // and should not be used. // // Integer division and modulo operation are costly: below 20 instructions on devices of compute capability 2.x and // higher. They can be replaced with bitwise operations in some cases: If n is a power of 2, (i/n) is equivalent to // (i>>log2(n)) and (i%n) is equivalent to (i&(n-1)); the compiler will perform these conversions if n is literal. namespace mf { /// CUDA kernel for simple byte-per-cell world evaluation. /// /// @param lifeData Linearized 2D array of life data with byte-per-cell density. /// @param worldWidth Width of life world in cells (bytes). /// @param worldHeight Height of life world in cells (bytes). /// @param resultLifeData Result buffer in the same format as input. __global__ void simpleLifeKernel(const ubyte* lifeData, uint worldWidth, uint worldHeight, ubyte* resultLifeData) { uint worldSize = worldWidth * worldHeight; for (uint cellId = blockIdx.x * blockDim.x + threadIdx.x; cellId < worldSize; cellId += blockDim.x * gridDim.x) { uint x = cellId % worldWidth; uint yAbs = cellId - x; uint xLeft = (x + worldWidth - 1) % worldWidth; uint xRight = (x + 1) % worldWidth; uint yAbsUp = (yAbs + worldSize - worldWidth) % worldSize; uint yAbsDown = (yAbs + worldWidth) % worldSize; // Count alive cells. uint aliveCells = lifeData[xLeft + yAbsUp] + lifeData[x + yAbsUp] + lifeData[xRight + yAbsUp] + lifeData[xLeft + yAbs] + lifeData[xRight + yAbs] + lifeData[xLeft + yAbsDown] + lifeData[x + yAbsDown] + lifeData[xRight + yAbsDown]; resultLifeData[x + yAbs] = aliveCells == 3 || (aliveCells == 2 && lifeData[x + yAbs]) ? 1 : 0; } } /// Runs a kernel for simple byte-per-cell world evaluation. bool runSimpleLifeKernel(ubyte*& d_lifeData, ubyte*& d_lifeDataBuffer, size_t worldWidth, size_t worldHeight, size_t iterationsCount, ushort threadsCount) { if ((worldWidth * worldHeight) % threadsCount != 0) { return false; } size_t reqBlocksCount = (worldWidth * worldHeight) / threadsCount; ushort blocksCount = (ushort)std::min((size_t)32768, reqBlocksCount); for (size_t i = 0; i < iterationsCount; ++i) { simpleLifeKernel<<<blocksCount, threadsCount>>>(d_lifeData, uint(worldWidth), uint(worldHeight), d_lifeDataBuffer); std::swap(d_lifeData, d_lifeDataBuffer); } checkCudaErrors(cudaDeviceSynchronize()); return true; } /// CUDA kernel for rendering of life world on the screen. /// This kernel transforms bit-per-cell life world to ARGB screen buffer. __global__ void displayLifeKernel(const ubyte* lifeData, uint worldWidth, uint worldHeight, uchar4* destination, int destWidth, int detHeight, int2 displacement, double zoomFactor, int multisample, bool simulateColors, bool cyclic, bool bitLife) { uint pixelId = blockIdx.x * blockDim.x + threadIdx.x; int x = (int)floor(((int)(pixelId % destWidth) - displacement.x) * zoomFactor); int y = (int)floor(((int)(pixelId / destWidth) - displacement.y) * zoomFactor); if (cyclic) { x = ((x % (int)worldWidth) + worldWidth) % worldWidth; y = ((y % (int)worldHeight) + worldHeight) % worldHeight; } else if (x < 0 || y < 0 || x >= worldWidth || y >= worldHeight) { destination[pixelId].x = 127; destination[pixelId].y = 127; destination[pixelId].z = 127; return; } int value = 0; // Start at value - 1. int increment = 255 / (multisample * multisample); if (bitLife) { for (int dy = 0; dy < multisample; ++dy) { int yAbs = (y + dy) * worldWidth; for (int dx = 0; dx < multisample; ++dx) { int xBucket = yAbs + x + dx; value += ((lifeData[xBucket >> 3] >> (7 - (xBucket & 0x7))) & 0x1) * increment; } } } else { for (int dy = 0; dy < multisample; ++dy) { int yAbs = (y + dy) * worldWidth; for (int dx = 0; dx < multisample; ++dx) { value += lifeData[yAbs + (x + dx)] * increment; } } } bool isNotOnBoundary = !cyclic || !(x == 0 || y == 0); if (simulateColors) { if (value > 0) { if (destination[pixelId].w > 0) { // Stayed alive - get darker. if (destination[pixelId].y > 63) { if (isNotOnBoundary) { --destination[pixelId].x; } --destination[pixelId].y; --destination[pixelId].z; } } else { // Born - full white color. destination[pixelId].x = 255; destination[pixelId].y = 255; destination[pixelId].z = 255; } } else { if (destination[pixelId].w > 0) { // Died - dark green. if (isNotOnBoundary) { destination[pixelId].x = 0; } destination[pixelId].y = 128; destination[pixelId].z = 0; } else { // Stayed dead - get darker. if (destination[pixelId].y > 8) { if (isNotOnBoundary) { } destination[pixelId].y -= 8; } } } } else { destination[pixelId].x = isNotOnBoundary ? value : 255; destination[pixelId].y = value; destination[pixelId].z = value; } // Save last state of the cell to the alpha channel that is not used in rendering. destination[pixelId].w = value; } /// Runs a kernel for rendering of life world on the screen. void runDisplayLifeKernel(const ubyte* d_lifeData, size_t worldWidth, size_t worldHeight, uchar4* destination, int destWidth, int destHeight, int displacementX, int displacementY, int zoom, bool simulateColors, bool cyclic, bool bitLife) { ushort threadsCount = 256; assert((worldWidth * worldHeight) % threadsCount == 0); size_t reqBlocksCount = (destWidth * destHeight) / threadsCount; assert(reqBlocksCount < 65536); ushort blocksCount = (ushort)reqBlocksCount; int multisample = std::min(4, (int)std::pow(2, std::max(0, zoom))); displayLifeKernel<<<blocksCount, threadsCount>>>(d_lifeData, uint(worldWidth), uint(worldHeight), destination, destWidth, destHeight, make_int2(displacementX, displacementY), std::pow(2, zoom), multisample, zoom > 1 ? false : simulateColors, cyclic, bitLife); checkCudaErrors(cudaDeviceSynchronize()); } /// CUDA kernel that encodes byte-per-cell data to bit-per-cell data. /// Needs to be invoked for each byte in encoded data (cells / 8). __global__ void bitLifeEncodeKernel(const ubyte* lifeData, size_t encWorldSize, ubyte* resultEncodedLifeData) { for (size_t outputBucketId = blockIdx.x * blockDim.x + threadIdx.x; outputBucketId < encWorldSize; outputBucketId += blockDim.x * gridDim.x) { size_t cellId = outputBucketId << 3; ubyte result = lifeData[cellId] << 7 | lifeData[cellId + 1] << 6 | lifeData[cellId + 2] << 5 | lifeData[cellId + 3] << 4 | lifeData[cellId + 4] << 3 | lifeData[cellId + 5] << 2 | lifeData[cellId + 6] << 1 | lifeData[cellId + 7]; resultEncodedLifeData[outputBucketId] = result; } } /// Runs a kernel that encodes byte-per-cell data to bit-per-cell data. void runBitLifeEncodeKernel(const ubyte* d_lifeData, uint worldWidth, uint worldHeight, ubyte* d_encodedLife) { assert(worldWidth % 8 == 0); size_t worldEncDataWidth = worldWidth / 8; size_t encWorldSize = worldEncDataWidth * worldHeight; ushort threadsCount = 256; assert(encWorldSize % threadsCount == 0); size_t reqBlocksCount = encWorldSize / threadsCount; ushort blocksCount = (ushort)std::min((size_t)32768, reqBlocksCount); bitLifeEncodeKernel<<<blocksCount, threadsCount>>>(d_lifeData, encWorldSize, d_encodedLife); checkCudaErrors(cudaDeviceSynchronize()); } /// CUDA kernel that decodes data from bit-per-cell to byte-per-cell format. /// Needs to be invoked for each byte in encoded data (cells / 8). __global__ void bitLifeDecodeKernel(const ubyte* encodedLifeData, uint encWorldSize, ubyte* resultDecodedlifeData) { for (uint outputBucketId = blockIdx.x * blockDim.x + threadIdx.x; outputBucketId < encWorldSize; outputBucketId += blockDim.x * gridDim.x) { uint cellId = outputBucketId << 3; ubyte dataBucket = encodedLifeData[outputBucketId]; resultDecodedlifeData[cellId] = dataBucket >> 7; resultDecodedlifeData[cellId + 1] = (dataBucket >> 6) & 0x01; resultDecodedlifeData[cellId + 2] = (dataBucket >> 5) & 0x01; resultDecodedlifeData[cellId + 3] = (dataBucket >> 4) & 0x01; resultDecodedlifeData[cellId + 4] = (dataBucket >> 3) & 0x01; resultDecodedlifeData[cellId + 5] = (dataBucket >> 2) & 0x01; resultDecodedlifeData[cellId + 6] = (dataBucket >> 1) & 0x01; resultDecodedlifeData[cellId + 7] = dataBucket & 0x01; } } /// Runs a kernel that decodes data from bit-per-cell to byte-per-cell format. void runBitLifeDecodeKernel(const ubyte* d_encodedLife, uint worldWidth, uint worldHeight, ubyte* d_lifeData) { assert(worldWidth % 8 == 0); uint worldEncDataWidth = worldWidth / 8; uint encWorldSize = worldEncDataWidth * worldHeight; ushort threadsCount = 256; assert(encWorldSize % threadsCount == 0); uint reqBlocksCount = encWorldSize / threadsCount; ushort blocksCount = ushort(std::min(32768u, reqBlocksCount)); // decode life data back to byte per cell format bitLifeDecodeKernel<<<blocksCount, threadsCount>>>(d_encodedLife, encWorldSize, d_lifeData); checkCudaErrors(cudaDeviceSynchronize()); } /// CUDA device function that evaluates state of lookup table based on coordinates and key (state). __device__ inline uint getCellState(uint x, uint y, uint key) { uint index = y * 6 + x; return (key >> ((3 * 6 - 1) - index)) & 0x1; } /// CUDA kernel that computes the 6x3 lookup table. /// Needs to be invoked for each entry in lookup table (table size is 2^(6 * 3)). __global__ void precompute6x3EvaluationTableKernel(ubyte* resultEvalTableData) { uint tableIndex = blockIdx.x * blockDim.x + threadIdx.x; ubyte resultState = 0; // For each cell. for (uint dx = 0; dx < 4; ++dx) { // Count alive neighbors. uint aliveCount = 0; for (uint x = 0; x < 3; ++x) { for (uint y = 0; y < 3; ++y) { aliveCount += getCellState(x + dx, y, tableIndex); } } uint centerState = getCellState(1 + dx, 1, tableIndex); aliveCount -= centerState; // Do not count center cell in the sum. if (aliveCount == 3 || (aliveCount == 2 && centerState == 1)) { resultState |= 1 << (3 - dx); } } resultEvalTableData[tableIndex] = resultState; } /// Runs a kernel that computes the 6x3 lookup table. void runPrecompute6x3EvaluationTableKernel(ubyte* d_lookupTable) { size_t lookupTableSize = 1 << (6 * 3); ushort threadsCount = 256; assert(lookupTableSize % threadsCount == 0); size_t reqBlocksCount = lookupTableSize / threadsCount; assert(reqBlocksCount < 65536); ushort blocksCount = (ushort)reqBlocksCount; precompute6x3EvaluationTableKernel<<<blocksCount, threadsCount>>>(d_lookupTable); checkCudaErrors(cudaDeviceSynchronize()); } /// CUDA kernel that evaluates bit-per-cell life world using lookup table. /// Needs to be called (worldDataWidth * worldHeight) / bytesPerThread times. /// Note that worldDataWidth % bytesPerThread must be 0 (threads can not jump between rows). /// /// @param lifeData Linearized 2D array of life data with bit-per-cell density. /// @param worldDataWidth Width of life data in bytes (width / 8). /// @param worldHeight Height of life data (same as worldDataHeight would be). /// @param bytesPerThread Number of bytes of life data processed per thread. /// @param evalTableData Evaluation lookup table 6 x 3 (for 4 bits of data). /// @param resultLifeData Result buffer in the same format as input. __global__ void bitLifeKernelLookup(const ubyte* lifeData, uint worldDataWidth, uint worldHeight, uint bytesPerThread, const ubyte* evalTableData, ubyte* resultLifeData) { uint worldSize = (worldDataWidth * worldHeight); for (uint cellId = (blockIdx.x * blockDim.x + threadIdx.x) * bytesPerThread; cellId < worldSize; cellId += blockDim.x * gridDim.x * bytesPerThread) { uint x = (cellId + worldDataWidth - 1) % worldDataWidth; // Start at block x - 1. uint yAbs = (cellId / worldDataWidth) * worldDataWidth; uint yAbsUp = (yAbs + worldSize - worldDataWidth) % worldSize; uint yAbsDown = (yAbs + worldDataWidth) % worldSize; // Initialize data with previous byte and current byte. uint data0 = (uint)lifeData[x + yAbsUp] << 8; uint data1 = (uint)lifeData[x + yAbs] << 8; uint data2 = (uint)lifeData[x + yAbsDown] << 8; x = (x + 1) % worldDataWidth; data0 |= (uint)lifeData[x + yAbsUp]; data1 |= (uint)lifeData[x + yAbs]; data2 |= (uint)lifeData[x + yAbsDown]; for (uint i = 0; i < bytesPerThread; ++i) { uint oldX = x; // Old x is referring to current center cell. x = (x + 1) % worldDataWidth; data0 = (data0 << 8) | (uint)lifeData[x + yAbsUp]; data1 = (data1 << 8) | (uint)lifeData[x + yAbs]; data2 = (data2 << 8) | (uint)lifeData[x + yAbsDown]; uint lifeStateHi = ((data0 & 0x1F800) << 1) | ((data1 & 0x1F800) >> 5) | ((data2 & 0x1F800) >> 11); uint lifeStateLo = ((data0 & 0x1F80) << 5) | ((data1 & 0x1F80) >> 1) | ((data2 & 0x1F80) >> 7); resultLifeData[oldX + yAbs] = (evalTableData[lifeStateHi] << 4) | evalTableData[lifeStateLo]; } } } /// CUDA kernel that evaluates bit-per-cell life world using alive cells counting. /// Parameters are the same as @see bitLifeKernelLookup. __global__ void bitLifeKernelCounting(const ubyte* lifeData, uint worldDataWidth, uint worldHeight, uint bytesPerThread, ubyte* resultLifeData) { uint worldSize = (worldDataWidth * worldHeight); for (uint cellId = (blockIdx.x * blockDim.x + threadIdx.x) * bytesPerThread; cellId < worldSize; cellId += blockDim.x * gridDim.x * bytesPerThread) { uint x = (cellId + worldDataWidth - 1) % worldDataWidth; // Start at block x - 1. uint yAbs = (cellId / worldDataWidth) * worldDataWidth; uint yAbsUp = (yAbs + worldSize - worldDataWidth) % worldSize; uint yAbsDown = (yAbs + worldDataWidth) % worldSize; // Initialize data with previous byte and current byte. uint data0 = (uint)lifeData[x + yAbsUp] << 16; uint data1 = (uint)lifeData[x + yAbs] << 16; uint data2 = (uint)lifeData[x + yAbsDown] << 16; x = (x + 1) % worldDataWidth; data0 |= (uint)lifeData[x + yAbsUp] << 8; data1 |= (uint)lifeData[x + yAbs] << 8; data2 |= (uint)lifeData[x + yAbsDown] << 8; for (uint i = 0; i < bytesPerThread; ++i) { uint oldX = x; // Old x is referring to current center cell. x = (x + 1) % worldDataWidth; data0 |= (uint)lifeData[x + yAbsUp]; data1 |= (uint)lifeData[x + yAbs]; data2 |= (uint)lifeData[x + yAbsDown]; uint result = 0; for (uint j = 0; j < 8; ++j) { // 23 ops. //uint aliveCells = ((data0 >> 14) & 0x1u) + ((data0 >> 15) & 0x1u) + ((data0 >> 16) & 0x1u) // + ((data1 >> 14) & 0x1) + ((data1 >> 16) & 0x1) // Do not count middle cell. // + ((data2 >> 14) & 0x1u) + ((data2 >> 15) & 0x1u) + ((data2 >> 16) & 0x1u); // 10 ops + modulo. //unsigned long long state = unsigned long long(((data0 & 0x1C000) >> 8) // | ((data1 & 0x14000) >> 11) | ((data2 & 0x1C000) >> 14)); //assert(sizeof(state) == 8); //uint aliveCells = uint((state * 0x200040008001ULL & 0x111111111111111ULL) % 0xf); // 15 ops uint aliveCells = (data0 & 0x14000) + (data1 & 0x14000) + (data2 & 0x14000); aliveCells >>= 14; aliveCells = (aliveCells & 0x3) + (aliveCells >> 2) + ((data0 >> 15) & 0x1u) + ((data2 >> 15) & 0x1u); result = result << 1 | (aliveCells == 3 || (aliveCells == 2 && (data1 & 0x8000u)) ? 1u : 0u); data0 <<= 1; data1 <<= 1; data2 <<= 1; } resultLifeData[oldX + yAbs] = result; } } } /// CUDA device function that swaps endianess of a 32 bits word. __device__ inline uint swapEndianessUint32(uint val) { val = ((val << 8) & 0xFF00FF00u) | ((val >> 8) & 0xFF00FFu); return (val << 16) | ((val >> 16) & 0xFFFFu); } /// CUDA kernel that evaluates bit-per-cell life world using alive cells counting in longer words. /// Parameters are the same as @see bitLifeKernelLookup. __global__ void bitLifeKernelCountingBigChunks(const uint* lifeData, uint worldDataWidth, uint worldHeight, uint chunksPerThread, uint* resultLifeData) { uint worldSize = (worldDataWidth * worldHeight); for (uint cellId = (blockIdx.x * blockDim.x + threadIdx.x) * chunksPerThread; cellId < worldSize; cellId += blockDim.x * gridDim.x * chunksPerThread) { uint x = (cellId + worldDataWidth - 1) % worldDataWidth; // Start at block x - 1. uint yAbs = (cellId / worldDataWidth) * worldDataWidth; uint yAbsUp = (yAbs + worldSize - worldDataWidth) % worldSize; uint yAbsDown = (yAbs + worldDataWidth) % worldSize; // All read data are in little endian form. Swap is needed to allow simple counting. uint currData0 = swapEndianessUint32(lifeData[x + yAbsUp]); uint currData1 = swapEndianessUint32(lifeData[x + yAbs]); uint currData2 = swapEndianessUint32(lifeData[x + yAbsDown]); x = (x + 1) % worldDataWidth; uint nextData0 = swapEndianessUint32(lifeData[x + yAbsUp]); uint nextData1 = swapEndianessUint32(lifeData[x + yAbs]); uint nextData2 = swapEndianessUint32(lifeData[x + yAbsDown]); for (uint i = 0; i < chunksPerThread; ++i) { // Evaluate front overlapping cell. uint aliveCells = (currData0 & 0x1u) + (currData1 & 0x1u) + (currData2 & 0x1u) + (nextData0 >> 31) + (nextData2 >> 31) // Do not count middle cell. + ((nextData0 >> 30) & 0x1u) + ((nextData1 >> 30) & 0x1u) + ((nextData2 >> 30) & 0x1u); // 31-st bit. uint result = (aliveCells == 3 || (aliveCells == 2 && (nextData1 >> 31))) ? (1u << 31) : 0u; uint oldX = x; // Old x is referring to current center cell. x = (x + 1) % worldDataWidth; currData0 = nextData0; currData1 = nextData1; currData2 = nextData2; nextData0 = swapEndianessUint32(lifeData[x + yAbsUp]); nextData1 = swapEndianessUint32(lifeData[x + yAbs]); nextData2 = swapEndianessUint32(lifeData[x + yAbsDown]); // Evaluate back overlapping cell. aliveCells = ((currData0 >> 1) & 0x1u) + ((currData1 >> 1) & 0x1u) + ((currData2 >> 1) & 0x1u) + (currData0 & 0x1u) + (currData2 & 0x1u) // Do not count middle cell. + (nextData0 >> 31) + (nextData1 >> 31) + (nextData2 >> 31); // 0-th bit. result |= (aliveCells == 3 || (aliveCells == 2 && (currData1 & 0x1u))) ? 1u : 0u; // The middle cells with no overlap. for (uint j = 0; j < 30; ++j) { uint shiftedData = currData0 >> j; uint aliveCells = (shiftedData & 0x1u) + ((shiftedData >> 1) & 0x1u) + ((shiftedData >> 2) & 0x1u); shiftedData = currData2 >> j; aliveCells += (shiftedData & 0x1u) + ((shiftedData >> 1) & 0x1u) + ((shiftedData >> 2) & 0x1u); shiftedData = currData1 >> j; aliveCells += (shiftedData & 0x1u) + ((shiftedData >> 2) & 0x1u); // Do not count middle cell. result |= (aliveCells == 3 || (aliveCells == 2 && (shiftedData & 0x2)) ? (2u << j) : 0u); } // Final swap from big to little endian form on the result. resultLifeData[oldX + yAbs] = swapEndianessUint32(result); } } } /// Runs a kernel that evaluates given world of bit-per-cell density using algorithm specified by parameters. bool runBitLifeKernel(ubyte*& d_encodedLifeData, ubyte*& d_encodedlifeDataBuffer, const ubyte* d_lookupTable, size_t worldWidth, size_t worldHeight, size_t iterationsCount, ushort threadsCount, uint bytesPerThread, bool useBigChunks) { // World has to fit into 8 bits of every byte exactly. if (worldWidth % 8 != 0) { return false; } size_t worldEncDataWidth = worldWidth / 8; if (d_lookupTable == nullptr && useBigChunks) { size_t factor = sizeof(uint) / sizeof(ubyte); if (factor != 4) { return false; } if (worldEncDataWidth % factor != 0) { return false; } worldEncDataWidth /= factor; } if (worldEncDataWidth % bytesPerThread != 0) { return false; } size_t encWorldSize = worldEncDataWidth * worldHeight; if (encWorldSize > std::numeric_limits<uint>::max()) { // TODO: fix kernels to work with world bit sizes. return false; } if ((encWorldSize / bytesPerThread) % threadsCount != 0) { return false; } size_t reqBlocksCount = (encWorldSize / bytesPerThread) / threadsCount; ushort blocksCount = ushort(std::min(size_t(32768), reqBlocksCount)); if (d_lookupTable == nullptr) { if (useBigChunks) { // Does this really work?! Apparently yes. uint*& data = (uint*&)d_encodedLifeData; uint*& result = (uint*&)d_encodedlifeDataBuffer; for (size_t i = 0; i < iterationsCount; ++i) { bitLifeKernelCountingBigChunks<<<blocksCount, threadsCount>>>(data, uint(worldEncDataWidth), uint(worldHeight), bytesPerThread, result); std::swap(data, result); } } else { for (size_t i = 0; i < iterationsCount; ++i) { bitLifeKernelCounting<<<blocksCount, threadsCount>>>(d_encodedLifeData, uint(worldEncDataWidth), uint(worldHeight), bytesPerThread, d_encodedlifeDataBuffer); std::swap(d_encodedLifeData, d_encodedlifeDataBuffer); } } } else { for (size_t i = 0; i < iterationsCount; ++i) { bitLifeKernelLookup<<<blocksCount, threadsCount>>>(d_encodedLifeData, uint(worldEncDataWidth), uint(worldHeight), bytesPerThread, d_lookupTable, d_encodedlifeDataBuffer); std::swap(d_encodedLifeData, d_encodedlifeDataBuffer); } } checkCudaErrors(cudaDeviceSynchronize()); return true; } }
the_stack
* COMPILATION TIP * nvcc cg_eg3.cu -o cg_eg3 * * */ #include <cooperative_groups.h> #include <iostream> #include <algorithm> // std::fill_n #include <memory> // std::unique_ptr /* ********** functions to setup device GPU, test values ********** */ /** @fn getMaxGridSize * @brief get maxGridSize (total number threads on a (thread) grid, on device GPU, of a single device GPU * */ size_t get_maxGridSize() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); size_t MAXGRIDSIZE; if (count>0) { cudaGetDeviceProperties(&prop, 0); MAXGRIDSIZE = prop.maxGridSize[0]; return MAXGRIDSIZE; } else { return EXIT_FAILURE; } }; __global__ void inc_kernel(int *input,int inc, int L) { unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x; for (unsigned int idx=k_x; idx < L; idx += blockDim.x*gridDim.x) { input[idx] = ((int) idx + inc); } } /* ********** END of functions to setup device GPU, test values ********** */ namespace cg = cooperative_groups; /** @fn reduce_sum * @details used to reduce (summation) on a single thread block in shared memory * while not obvious from this function definition, in practical usage, * val will be the partial sum that is at the index given by the global thread index * threadIdx.x + blockDim.x * blockIdx.x; * and so we'll have loaded all the various array values for this particular thread block into * shared memory lane * */ __device__ int reduce_sum(cg::thread_group g, int *temp, int val) { int lane = g.thread_rank(); // Each iteration halves the number of active threads // Each thread adds to partial sum[i] its sum[lane+i] for (int i = g.size() / 2; i >0; i/=2) { // load the array values with this thread block into temp temp[lane] = val; g.sync(); // wait for all threads to store if (lane <i) { val += temp[lane+i]; } g.sync(); // wait for all threads to load } return val; // note: only thread 0 will return full sum }; /** @fn thread_sum * @brief compute many partial sums in parallel, Generalized to when n is not a power of 2 * @details compute many partial sums in parallel, Generalized to when n is not a power of, * where each thread strides through the array computing a partial sum * */ __device__ int thread_sum(int *input, int L) { int sum =0; unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x; /* increment by blockDim.x*gridDim.x, so that a single thread will do all the * "work" needed done on n, especially if n >= gridDim.x*blockDim.x = N_x*M_x */ for (int i=k_x; i < L/4; i += blockDim.x * gridDim.x) { int4 in = ((int4*) input)[i]; sum += in.x + in.y + in.z + in.w; } // process remaining elements for (unsigned int idx= k_x + L/4*4; idx < L; idx += 4 ) { sum += input[idx]; } return sum; }; /** @fn sum_kernel * @brief sum kernel, generalized for n not a power of 2 */ __global__ void sum_kernel(int *sum, int *input, int L) { // for a particular thread k_x, we've obtained the // sum of input[k_x], input[k_x+1], ... input[k_x+3] in sum4 int sum4 = thread_sum(input, L); extern __shared__ int temp[]; auto g = cg::this_thread_block(); int block_sum = reduce_sum(g,temp,sum4); if (g.thread_rank() == 0) { atomicAdd(sum, block_sum); } }; int main(int argc, char* argv[]) { size_t MAXGRIDSIZE = get_maxGridSize(); /* ***** (thread) grid,block dims ***** */ /* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is * determined here */ unsigned int M_x = 1<<6; // M_x = number of threads in x-direction, in a single block, i.e. blocksize; 2^8=256 unsigned int L = 1<<7; // doesn't output correct values for n = 1<<30 unsigned int MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x; // notice how we're only launching 1/4 of L threads unsigned int N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); int sharedBytes = M_x * sizeof(int); /* ***** END of (thread) grid,block dims ***** */ // setup input, output auto del_ints_lambda=[&](int* ptr) { cudaFree(ptr); }; std::unique_ptr<int,decltype(del_ints_lambda)> sum(nullptr,del_ints_lambda); std::unique_ptr<int[],decltype(del_ints_lambda)> input(nullptr,del_ints_lambda); cudaMallocManaged((void**)&sum, sizeof(int)) ; cudaMallocManaged((void**)&input, L*sizeof(int)); std::fill_n(input.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input.get(),L); /* sanity check */ // host output of sum std::unique_ptr<int> h_sum = std::make_unique<int>( 0 ); cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<7 : " << (1<<7) << std::endl; /* ******************************************************* */ /* ********** more tests of \sum_{i=1}^L 1 = L ********** */ /* ***** L = 1<<8 = 2^8 = 256 test ***** */ L = 1<< 8; // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input1(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input1,L*sizeof(int)); std::fill_n(input1.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input1.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<8 : " << (1<<8) << std::endl; /* ***** L = 1<<9 + 1= 2^9 + 1= 513 test ***** */ L = (1<< 9)+1; // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input2(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input2,L*sizeof(int)); std::fill_n(input2.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input2.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<9) + 1 : " << ((1<<9)+1) << std::endl; /* ***** L = 1<<29 = 2^29 test ***** */ { L = (1<< 29); // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input3(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input3,L*sizeof(int)); std::fill_n(input3.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input3.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29) : " << (1<<29) << std::endl; } /* ***** L = (1<<29) + 2 = (2^29 + 2) test ***** */ { L = (1<< 29)+2; // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input4,L*sizeof(int)); std::fill_n(input4.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29)+2 : " << ((1<<29)+2) << std::endl; } /* ***** L = 1<<30 = 2^30 test ***** */ { L = (1<< 30); // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input4,L*sizeof(int)); std::fill_n(input4.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30) : " << (1<<30) << std::endl; } /* ***** L = 1<<30 +3 = 2^30+3 test ***** */ { L = (1<< 30)+3; // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input4,L*sizeof(int)); std::fill_n(input4.get(),L,1); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30)+3 : " << ((1<<30)+3) << std::endl; } /* ********** END of more tests of \sum_{i=1}^L 1 = L ********** */ /* ************************************************************ */ /* ********** more tests of \sum_{i=1}^L i = L(L+1)/2 ********** */ /* ***** L = 1<<15 = 2^15 test ***** */ { L = (1<< 15); // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input5(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input5,L*sizeof(int)); inc_kernel<<< min((L+M_x-1)/M_x,MAX_BLOCKS), M_x>>>(input5.get(),1,L); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input5.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl; } /* ***** L = 1<<15 + 2 = 2^15 +2 test ***** */ { L = (1<< 15) + 2; // notice how we're only launching 1/4 of L threads N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); std::unique_ptr<int[],decltype(del_ints_lambda)> input6(nullptr,del_ints_lambda); cudaMallocManaged((void**) &input6,L*sizeof(int)); inc_kernel<<< min((L+M_x-1)/M_x,MAX_BLOCKS), M_x>>>(input6.get(),1,L); cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input6.get(),L); /* sanity check */ // host output of sum cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl; } }
the_stack
#define CUDAGMM_VERSION 5 #if(CUDAGMM_VERSION == 5) #define SWAP(a, b, t) t = (a); a = (b); b = (t) typedef struct CvFastBgGMMData { float4* ucGaussian; float* rWeight; int* rnUsedModes; } CvFastBgGMMData; enum ImageInfo { #if(CUDAGMM_VERSION >= 2) ImageInfoPixelCount = 0, // ImageInfoPixelsPerThread = 1, // ImageInfoCount = 2 #else ImageInfoInpWidth = 0, ImageInfoInpHeight = 1, ImageInfoInpWidthStep = 2, ImageInfoOutWidth = 3, ImageInfoOutHeight = 4, ImageInfoOutWidthStep = 5, ImageInfoPixelCount = 6, // ImageInfoPixelsPerThread = 7, // ImageInfoCount = 8 #endif }; // algorithm parameters typedef struct CvFastBgGMMParams { ///////////////////////// //very important parameters - things you will change //////////////////////// float fAlphaT; //alpha - speed of update - if the time interval you want to average over is T //set alpha=1/T. It is also useful at start to make T slowly increase //from 1 until the desired T float fTb; //Tb - threshold on the squared Mahalan. dist. to decide if it is well described //by the background model or not. Related to Cthr from the paper. //This does not influence the update of the background. A typical value could be 4 sigma //and that is Tb=4*4=16; ///////////////////////// //less important parameters - things you might change but be carefull //////////////////////// float fTg; //Tg - threshold on the squared Mahalan. dist. to decide //when a sample is close to the existing components. If it is not close //to any a new component will be generated. I use 3 sigma => Tg=3*3=9. //Smaller Tg leads to more generated components and higher Tg might make //lead to small number of components but they can grow too large float fTB;//1-cf from the paper //TB - threshold when the component becomes significant enough to be included into //the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0. //For alpha=0.001 it means that the mode should exist for approximately 105 frames before //it is considered foreground float fSigma; //initial standard deviation for the newly generated components. //It will will influence the speed of adaptation. A good guess should be made. //A simple way is to estimate the typical standard deviation from the images. //I used here 10 as a reasonable value float fCT;//CT - complexity reduction prior //this is related to the number of samples needed to accept that a component //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get //the standard Stauffer&Grimson algorithm (maybe not exact but very similar) //even less important parameters int nM;//max number of modes - const - 4 is usually enough //shadow detection parameters int bShadowDetection;//do shadow detection float fTau; // Tau - shadow threshold. The shadow is detected if the pixel is darker //version of the background. Tau is a threshold on how much darker the shadow can be. //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. float fPrune; //=-m_fAlphaT*m_fCT; //data int nNBands;//only RGB now ==3 int nWidth;//image size int nHeight; int nSize; int bRemoveForeground; } CvFastBgGMMParams; __constant__ CvFastBgGMMParams d_GMMParams; __constant__ CvFastBgGMMData d_GMMData; __constant__ int d_arrImageInfo[ImageInfoCount]; /*=======================================================================================*/ __device__ int _cudaUpdateFastBgGMM(int pixel, float red, float green, float blue, int* pModesUsed ) { //calculate distances to the modes (+ sort) //here we need to go in descending order!!! int pos; bool bFitsPDF = 0; int bBackground = 0; float m_fOneMinAlpha = 1 - d_GMMParams.fAlphaT; int nModes = (*pModesUsed); float weight, totalWeight = 0.0f; float dR, dG, dB; float dist, k, sigmanew; //go through all modes for (int iModes = 0; iModes < nModes; iModes++) { pos = pixel + iModes*d_arrImageInfo[ImageInfoPixelCount]; weight = d_GMMData.rWeight[pos]; //fit not found yet if (!bFitsPDF) { //check if it belongs to some of the modes //calculate distance float4 cGauss = d_GMMData.ucGaussian[pos]; dR = cGauss.x - red; dG = cGauss.y - green; dB = cGauss.z - blue; //check if it fits the current mode (Factor * sigma) //square distance -slower and less accurate //float maxDistance = cvSqrt(m_fTg*var); //if ((fabs(dR) <= maxDistance) && (fabs(dG) <= maxDistance) && (fabs(dB) <= maxDistance)) //circle dist = dR*dR + dG*dG + dB*dB; //background? - m_fTb if ((totalWeight < d_GMMParams.fTB) && (dist < d_GMMParams.fTb * cGauss.w)) bBackground = 1; //check fit if (dist < d_GMMParams.fTg * cGauss.w) { //belongs to the mode bFitsPDF = 1; //update distribution k = d_GMMParams.fAlphaT/weight; weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; weight += d_GMMParams.fAlphaT; cGauss.x -= k*(dR); cGauss.y -= k*(dG); cGauss.z -= k*(dB); //limit update speed for cov matrice //not needed sigmanew = cGauss.w + k*(dist - cGauss.w); //limit the variance cGauss.w = sigmanew < 4 ? 4 : sigmanew > 5 * d_GMMParams.fSigma ? 5 * d_GMMParams.fSigma : sigmanew; d_GMMData.ucGaussian[pos] = cGauss; //sort //all other weights are at the same place and //only the matched (iModes) is higher -> just find the new place for it for (int iLocal = iModes; iLocal > 0; iLocal--) { int posLocal = pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]; if (weight < (d_GMMData.rWeight[posLocal-d_arrImageInfo[ImageInfoPixelCount]])) { break; } else { //swap float tmpVal; float4 tmpuChar; SWAP(d_GMMData.ucGaussian[posLocal], d_GMMData.ucGaussian[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpuChar); SWAP(d_GMMData.rWeight[posLocal], d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpVal); } } //belongs to the mode } else { weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; //check prune if (weight < -(d_GMMParams.fPrune)) { weight = 0.0f; nModes--; // bPrune=1; //break;//the components are sorted so we can skip the rest } } //check if it fits the current mode (2.5 sigma) /////// } //fit not found yet else { weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; if (weight < -(d_GMMParams.fPrune)) { weight=0.0; nModes--; //bPrune=1; //break;//the components are sorted so we can skip the rest } } totalWeight += weight; d_GMMData.rWeight[pos] = weight; } //go through all modes ////// //renormalize weights for (int iLocal = 0; iLocal < nModes; iLocal++) { d_GMMData.rWeight[pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]] /= totalWeight; } //make new mode if needed and exit if (!bFitsPDF) { if (nModes == d_GMMParams.nM) { //replace the weakest } else { //add a new one //totalWeight+=m_fAlphaT; //pos++; nModes++; } pos = pixel + (nModes-1)*d_arrImageInfo[ImageInfoPixelCount]; if (nModes == 1) d_GMMData.rWeight[pos] = 1; else d_GMMData.rWeight[pos] = d_GMMParams.fAlphaT; //renormalize weights for (int iLocal = 0; iLocal < nModes-1; iLocal++) { d_GMMData.rWeight[pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]] *= m_fOneMinAlpha; } float4 cGauss; cGauss.x = red; cGauss.y = green; cGauss.z = blue; cGauss.w = d_GMMParams.fSigma; d_GMMData.ucGaussian[pos] = cGauss; //sort //find the new place for it for (int iLocal = nModes - 1; iLocal>0; iLocal--) { int posLocal = pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]; if (d_GMMParams.fAlphaT < (d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]])) { break; } else { //swap float4 tmpuChar; float tmpVal; SWAP(d_GMMData.ucGaussian[posLocal], d_GMMData.ucGaussian[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpuChar); SWAP(d_GMMData.rWeight[posLocal], d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpVal); } } } //set the number of modes *pModesUsed=nModes; return bBackground; } /*=======================================================================================*/ /*=======================================================================================*/ __device__ int _cudaRemoveShadowGMM(int pixel, float red, float green, float blue, int nModes) { //calculate distances to the modes (+ sort) //here we need to go in descending order!!! // long posPixel = pixel * m_nM; int pos; float tWeight = 0; float numerator, denominator; // check all the distributions, marked as background: for (int iModes=0;iModes<nModes;iModes++) { pos=pixel+iModes*d_arrImageInfo[ImageInfoPixelCount]; float4 cGauss = d_GMMData.ucGaussian[pos]; float weight = d_GMMData.rWeight[pos]; tWeight += weight; numerator = red * cGauss.x + green * cGauss.y + blue * cGauss.z; denominator = cGauss.x * cGauss.x + cGauss.y * cGauss.y + cGauss.z * cGauss.z; // no division by zero allowed if (denominator == 0) { break; } float a = numerator / denominator; // if tau < a < 1 then also check the color distortion if ((a <= 1) && (a >= d_GMMParams.fTau))//m_nBeta=1 { float dR=a * cGauss.x - red; float dG=a * cGauss.y - green; float dB=a * cGauss.z - blue; //square distance -slower and less accurate //float maxDistance = cvSqrt(m_fTb*var); //if ((fabs(dR) <= maxDistance) && (fabs(dG) <= maxDistance) && (fabs(dB) <= maxDistance)) //circle float dist=(dR*dR+dG*dG+dB*dB); if (dist<d_GMMParams.fTb*cGauss.w*a*a) { return 2; } } if (tWeight > d_GMMParams.fTB) { break; } } return 0; } /*=======================================================================================*/ /*=======================================================================================*/ __device__ void _cudaReplacePixelBackgroundGMM(int pixel, uchar4* pData) { uchar4 tmp; float4 cGauss = d_GMMData.ucGaussian[pixel]; tmp.z = (unsigned char) cGauss.x; tmp.y = (unsigned char) cGauss.y; tmp.x = (unsigned char) cGauss.z; (*pData) = tmp; } /*=======================================================================================*/ /*=======================================================================================*/ extern __shared__ int sharedInfo[]; template <int BLOCK_SIZE> __global__ void cudaUpdateFastBgGMM(unsigned char* data, unsigned char* output) { if(threadIdx.x == 0) { // the start pixel for current block sharedInfo[0] = (blockIdx.x * BLOCK_SIZE)*d_arrImageInfo[ImageInfoPixelsPerThread]; } __syncthreads(); int iPxStart = sharedInfo[0] + threadIdx.x; int iPxEnd = min( d_arrImageInfo[ImageInfoPixelCount], sharedInfo[0] + (BLOCK_SIZE * d_arrImageInfo[ImageInfoPixelsPerThread])); uchar4* pGlobalInput = ((uchar4*)data) + iPxStart; unsigned char* pGlobalOutput = output + iPxStart; int* pUsedModes = d_GMMData.rnUsedModes + iPxStart; unsigned char fRed, fGreen, fBlue; uchar4 currentInputPx; for(int i = iPxStart; i < iPxEnd; i += BLOCK_SIZE) { // retrieves the color currentInputPx = *pGlobalInput; fBlue = currentInputPx.x; fGreen = currentInputPx.y; fRed = currentInputPx.z; pGlobalInput += BLOCK_SIZE; // update model + background subtract int result = _cudaUpdateFastBgGMM(i, fRed, fGreen, fBlue, pUsedModes); int nMLocal = *pUsedModes; pUsedModes += BLOCK_SIZE; if (d_GMMParams.bShadowDetection) { if (!result) { result= _cudaRemoveShadowGMM(i, fRed, fGreen, fBlue, nMLocal); } } switch (result) { case 0: //foreground (*pGlobalOutput) = 255; if (d_GMMParams.bRemoveForeground) { _cudaReplacePixelBackgroundGMM(i, pGlobalInput-BLOCK_SIZE); } break; case 1: //background (*pGlobalOutput) = 0; break; case 2: //shadow (*pGlobalOutput) = 128; if (d_GMMParams.bRemoveForeground) { _cudaReplacePixelBackgroundGMM(i, pGlobalInput-BLOCK_SIZE); } break; } pGlobalOutput += BLOCK_SIZE; } } #endif
the_stack
#include "./common/errors.h" // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. float fx = 1.0f, fy = 1.0f, fz = 1.0f; const int m_x = 64, m_y = 64, m_z = 64; // shared memory tiles will be M*-by-*Pencils // sPencils is used when each thread calculates the derivative at one point // lPencils is used for coalescing in y and z where each thread has to // calculate the derivative at multiple points const int sPencils = 4; // small # pencils const int lPencils = 32; // large # pencils dim3 grid[3][2], block[3][2]; // stencil coefficients // __constant__ uses constant memory on device (GPU), FTW! __constant__ float c_ax, c_bx, c_cx, c_dx; __constant__ float c_ay, c_by, c_cy, c_dy; __constant__ float c_az, c_bz, c_cz, c_dz; // host routine to set constant data void setDerivativeParameters() { // check to make sure dimensions are integral multiples of sPencils if ((m_x % sPencils != 0) || (m_y % sPencils != 0) || (m_z % sPencils != 0)) { printf("'m_x', 'm_y', and 'm_z' must be integral multiples of sPencils\n"); exit(1); } if ((m_x % lPencils != 0) || (m_y % lPencils != 0)) { printf("'m_x' and 'm_y' must be multiples of lPencils\n"); exit(1); } // stencil weights (for unit length problem) float dsinv = m_x - 1.f; float ax = 4.f / 5.f * dsinv; float bx = -1.f / 5.f * dsinv; float cx = 4.f / 105.f * dsinv; float dx = -1.f / 280.f * dsinv; HANDLE_ERROR( cudaMemcpyToSymbol(c_ax, &ax, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_bx, &bx, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_cx, &cx, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_dx, &dx, sizeof(float), 0, cudaMemcpyHostToDevice) ); dsinv = m_y - 1.f; float ay = 4.f / 5.f * dsinv; float by = -1.f / 5.f * dsinv; float cy = 4.f / 105.f * dsinv; float dy = -1.f / 280.f * dsinv; HANDLE_ERROR( cudaMemcpyToSymbol(c_ay, &ay, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_by, &by, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_cy, &cy, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_dy, &dy, sizeof(float), 0, cudaMemcpyHostToDevice) ); dsinv = m_z - 1.f; float az = 4.f / 5.f * dsinv; float bz = -1.f / 5.f * dsinv; float cz = 4.f / 105.f * dsinv; float dz = -1.f / 280.f * dsinv; HANDLE_ERROR( cudaMemcpyToSymbol(c_az, &az, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_bz, &bz, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_cz, &cz, sizeof(float), 0, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpyToSymbol(c_dz, &dz, sizeof(float), 0, cudaMemcpyHostToDevice) ); // Execution configurations for small and large pencil tiles grid[0][0] = dim3(m_y / sPencils, m_z, 1); block[0][0] = dim3(m_x , sPencils, 1); grid[0][1] = dim3(m_y / lPencils, m_z, 1); block[0][1] = dim3(m_x , sPencils, 1); grid[1][0] = dim3(m_x / sPencils, m_z, 1); block[1][0] = dim3(sPencils , m_y, 1); grid[1][1] = dim3(m_x / lPencils, m_z, 1); // we want to use the same number of threads as above, // so when we use lPencils instead of sPencils in one // dimension, we multiply the other by sPencils/lPencils block[1][1] = dim3(lPencils, m_y * sPencils / lPencils, 1); grid[2][0] = dim3(m_x / sPencils, m_y, 1); block[2][0] = dim3(sPencils, m_z, 1); grid[2][1] = dim3(m_x / lPencils, m_y, 1); block[2][1] = dim3(lPencils, m_z * sPencils / lPencils, 1); } void initInput(float *f, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < m_z; k++) { for (int j = 0; j < m_y; j++) { for (int i = 0; i < m_x; i++) { switch(dim) { case 0: f[k*m_x*m_y+j*m_x+i] = cos(fx*twopi*(i-1.f)/(m_x-1.f)); break; case 1: f[k*m_x*m_y+j*m_x+i] = cos(fy*twopi*(j-1.f)/(m_y-1.f)); break; case 2: f[k*m_x*m_y+j*m_x+i] = cos(fz*twopi*(k-1.f)/(m_z-1.f)); break; } } } } } void initSol(float *sol, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < m_z; k++) { for (int j = 0; j < m_y; j++) { for (int i = 0; i < m_x; i++) { switch(dim) { case 0: sol[k*m_x*m_y+j*m_x+i] = -fx*twopi*sin(fx*twopi*(i-1.f)/(m_x-1.f)); break; case 1: sol[k*m_x*m_y+j*m_x+i] = -fy*twopi*sin(fy*twopi*(j-1.f)/(m_y-1.f)); break; case 2: sol[k*m_x*m_y+j*m_x+i] = -fz*twopi*sin(fz*twopi*(k-1.f)/(m_z-1.f)); break; } } } } } void checkResults(double &error, double &maxError, float *sol, float *df) { // error = sqrt(sum((sol-df)**2)/(M_x*M_y*M_z)) // maxError = maxval(abs(sol-df)) maxError = 0; error = 0; for (int k = 0; k < m_z; k++) { for (int j = 0; j < m_y; j++) { for (int i = 0; i < m_x; i++) { float s = sol[k*m_x*m_y+j*m_x+i]; float f = df[k*m_x*m_y+j*m_x+i]; // printf("%d %d %d: %f %f\n", i, j, k, s, f); error += (s-f)*(s-f); if (fabs(s-f) > maxError) maxError = fabs(s-f); } } } error = sqrt(error / (m_x*m_y*m_z)); } // --------------- // x derivatives // --------------- __global__ void derivative_x(float *f, float *df) { __shared__ float s_f[sPencils][m_x+8]; // 4-wide halo int i = threadIdx.x; int j = blockIdx.x*blockDim.y + threadIdx.y; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset int sj = threadIdx.y; // local j for shared memory access int globalIdx = k * m_x * m_y + j * m_x + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { s_f[sj][si-4] = s_f[sj][si+m_x-5]; s_f[sj][si+m_x] = s_f[sj][si+1]; } __syncthreads(); df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) * c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) * c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) * c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } // this version uses a 64x32 shared memory tile. // still with 64*sPencils threads __global__ void derivative_x_lPencils(float *f, float *df) { __shared__ float s_f[lPencils][m_x+8]; // 4-wide halo int i = threadIdx.x; int jBase = blockIdx.x*lPencils; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * m_x * m_y + (jBase + sj) * m_x + i; s_f[sj][si] = f[globalIdx]; } __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { s_f[sj][si-4] = s_f[sj][si+m_x-5]; s_f[sj][si+m_x] = s_f[sj][si+1]; } } __syncthreads(); for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * m_x * m_y + (jBase + sj) * m_x + i ; df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) + c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) + c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) + c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } } // ------------- // y derivatives // ------------- __global__ void derivative_y(float *f, float *df) { __shared__ float s_f[m_y+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = threadIdx.y; int k = blockIdx.y; int si = threadIdx.x; int sj = j + 4; int globalIdx = k * m_x * m_y + j * m_x + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); if (j < 4) { s_f[sj-4][si] = s_f[sj+m_y-5][si]; s_f[sj+m_y][si] = s_f[sj+1][si]; } __syncthreads(); df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } // y derivative using a tile of 32x64, // launch with thread block of 32x8 __global__ void derivative_y_lPencils(float *f, float *df) { __shared__ float s_f[m_y+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int k = blockIdx.y; int si = threadIdx.x; for (int j = threadIdx.y; j < m_y; j += blockDim.y) { int globalIdx = k * m_x * m_y + j * m_x + i; int sj = j + 4; s_f[sj][si] = f[globalIdx]; } __syncthreads(); int sj = threadIdx.y + 4; if (sj < 8) { s_f[sj-4][si] = s_f[sj+m_y-5][si]; s_f[sj+m_y][si] = s_f[sj+1][si]; } __syncthreads(); for (int j = threadIdx.y; j < m_y; j += blockDim.y) { int globalIdx = k * m_x * m_y + j * m_x + i; int sj = j + 4; df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } } // ------------ // z derivative // ------------ __global__ void derivative_z(float *f, float *df) { __shared__ float s_f[m_z+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int k = threadIdx.y; int si = threadIdx.x; int sk = k + 4; // halo offset int globalIdx = k * m_x * m_y + j * m_x + i; s_f[sk][si] = f[globalIdx]; __syncthreads(); if (k < 4) { s_f[sk-4][si] = s_f[sk+m_z-5][si]; s_f[sk+m_z][si] = s_f[sk+1][si]; } __syncthreads(); df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } __global__ void derivative_z_lPencils(float *f, float *df) { __shared__ float s_f[m_z+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int si = threadIdx.x; for (int k = threadIdx.y; k < m_z; k += blockDim.y) { int globalIdx = k * m_x * m_y + j * m_x + i; int sk = k + 4; s_f[sk][si] = f[globalIdx]; } __syncthreads(); int k = threadIdx.y + 4; if (k < 8) { s_f[k-4][si] = s_f[k+m_z-5][si]; s_f[k+m_z][si] = s_f[k+1][si]; } __syncthreads(); for (int k = threadIdx.y; k < m_z; k += blockDim.y) { int globalIdx = k * m_x * m_y + j * m_x + i; int sk = k +4; df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } } // Run the kernels for a given dimension. One for sPencils, one for lPencils void runTest(int dimension) { void (*fpDeriv[2])(float*, float*); switch(dimension) { case 0: fpDeriv[0] = derivative_x; fpDeriv[1] = derivative_x_lPencils; break; case 1: fpDeriv[0] = derivative_y; fpDeriv[1] = derivative_y_lPencils; break; case 2: fpDeriv[0] = derivative_z; fpDeriv[1] = derivative_z_lPencils; break; } int sharedDims[3][2][2] = { m_x, sPencils, m_x, lPencils, sPencils, m_y, lPencils, m_y, sPencils, m_z, lPencils, m_z }; float f[m_x*m_y*m_z]; float df[m_x*m_y*m_z]; float sol[m_x*m_y*m_z]; initInput(f, dimension); initSol(sol, dimension); // device arrays int bytes = m_x*m_y*m_z * sizeof(float); float *d_f, *d_df; HANDLE_ERROR( cudaMalloc((void**)&d_f, bytes) ); HANDLE_ERROR( cudaMalloc((void**)&d_df, bytes) ); const int nReps = 20; float milliseconds; cudaEvent_t startEvent, stopEvent; HANDLE_ERROR( cudaEventCreate(&startEvent) ); HANDLE_ERROR( cudaEventCreate(&stopEvent) ); double error, maxError; printf("%c derivatives\n\n", (char)(0x58 + dimension)); for (int fp = 0; fp < 2; fp++) { HANDLE_ERROR( cudaMemcpy(d_f, f, bytes, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemset(d_df, 0, bytes) ); fpDeriv[fp]<<<grid[dimension][fp], block[dimension][fp]>>>(d_f, d_df); HANDLE_ERROR( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < nReps; i++) fpDeriv[fp]<<<grid[dimension][fp],block[dimension][fp]>>>(d_f, d_df); HANDLE_ERROR( cudaEventRecord(stopEvent, 0) ); HANDLE_ERROR( cudaEventSynchronize(stopEvent) ); HANDLE_ERROR( cudaEventElapsedTime(&milliseconds, startEvent, stopEvent) ); HANDLE_ERROR( cudaMemcpy(df, d_df, bytes, cudaMemcpyDeviceToHost) ); checkResults(error, maxError, sol, df); printf(" Using shared memory tile of %d x %d\n", sharedDims[dimension][fp][0], sharedDims[dimension][fp][1]); printf(" RMS error: %e\n", error); printf(" MAX error: %e\n", maxError); printf(" Average time (ms): %f\n", milliseconds / nReps); printf(" Average Bandwidth (GB/s): %f\n\n", 2.f * 1e-6 * m_x * m_y * m_z * nReps * sizeof(float) / milliseconds); } HANDLE_ERROR( cudaEventDestroy(startEvent) ); HANDLE_ERROR( cudaEventDestroy(stopEvent) ); HANDLE_ERROR( cudaFree(d_f) ); HANDLE_ERROR( cudaFree(d_df) ); } // This is the main host code for the finite difference // example. The kernels are contained in the derivative_m module int main(void) { // Print device and precision cudaDeviceProp prop; HANDLE_ERROR( cudaGetDeviceProperties(&prop, 0) ); printf("\nDevice name: %s\n", prop.name); printf("Compute Capability: %d.%d\n\n", prop.major, prop.minor); setDerivativeParameters(); // initialize runTest(0); // x derivative runTest(1); // y derivative runTest(2); // z derivative return 0; }
the_stack
#include <thrust/device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <nvbench/nvbench.cuh> #include <cuco/static_multimap.cuh> #include <key_generator.hpp> namespace { // Custom pair equal template <typename Key, typename Value> struct pair_equal { __device__ bool operator()(const cuco::pair_type<Key, Value>& lhs, const cuco::pair_type<Key, Value>& rhs) const { return lhs.first == rhs.first; } }; } // anonymous namespace /** * @brief A benchmark evaluating multi-value `insert` performance: * - Total number of insertions: 100'000'000 * - CG size: 8 */ template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) == sizeof(Value)), void> nvbench_static_multimap_insert( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { auto const num_keys = state.get_int64("NumInputs"); auto const occupancy = state.get_float64("Occupancy"); std::size_t const size = num_keys / occupancy; std::vector<Key> h_keys(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Multiplicity, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); state.add_element_count(num_keys, "NumKeys"); state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer, [&](nvbench::launch& launch, auto& timer) { cuco::static_multimap<Key, Value> map{size, -1, -1}; // Use timers to explicitly mark the target region timer.start(); map.insert(d_pairs.begin(), d_pairs.end(), launch.get_stream()); timer.stop(); }); } template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) != sizeof(Value)), void> nvbench_static_multimap_insert( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { state.skip("Key should be the same type as Value."); } /** * @brief A benchmark evaluating multi-value `count` performance: * - Total number of insertions: 100'000'000 * - CG size: 8 */ template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) == sizeof(Value)), void> nvbench_static_multimap_count( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { auto const num_keys = state.get_int64("NumInputs"); auto const occupancy = state.get_float64("Occupancy"); auto const matching_rate = state.get_float64("MatchingRate"); std::size_t const size = num_keys / occupancy; std::vector<Key> h_keys(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Multiplicity, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } generate_probe_keys<Key>(matching_rate, h_keys.begin(), h_keys.end()); thrust::device_vector<Key> d_keys(h_keys); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); state.add_element_count(num_keys, "NumKeys"); cuco::static_multimap<Key, Value> map{size, -1, -1}; map.insert(d_pairs.begin(), d_pairs.end()); state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto count = map.count(d_keys.begin(), d_keys.end(), launch.get_stream()); }); } template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) != sizeof(Value)), void> nvbench_static_multimap_count( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { state.skip("Key should be the same type as Value."); } /** * @brief A benchmark evaluating multi-value `retrieve` performance: * - Total number of insertions: 100'000'000 * - CG size: 8 */ template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) == sizeof(Value)), void> nvbench_static_multimap_retrieve( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { auto const num_keys = state.get_int64("NumInputs"); auto const occupancy = state.get_float64("Occupancy"); auto const matching_rate = state.get_float64("MatchingRate"); std::size_t const size = num_keys / occupancy; std::vector<Key> h_keys(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Multiplicity, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } generate_probe_keys<Key>(matching_rate, h_keys.begin(), h_keys.end()); thrust::device_vector<Key> d_keys(h_keys); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); state.add_element_count(num_keys, "NumKeys"); cuco::static_multimap<Key, Value> map{size, -1, -1}; map.insert(d_pairs.begin(), d_pairs.end()); auto const output_size = map.count_outer(d_keys.begin(), d_keys.end()); thrust::device_vector<cuco::pair_type<Key, Value>> d_results(output_size); state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { map.retrieve_outer(d_keys.begin(), d_keys.end(), d_results.data().get(), launch.get_stream()); }); } template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) != sizeof(Value)), void> nvbench_static_multimap_retrieve( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { state.skip("Key should be the same type as Value."); } /** * @brief A benchmark evaluating multi-value query (`count` + `retrieve`) performance: * - Total number of insertions: 100'000'000 * - CG size: 8 */ template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) == sizeof(Value)), void> nvbench_static_multimap_query( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { auto const num_keys = state.get_int64("NumInputs"); auto const occupancy = state.get_float64("Occupancy"); auto const matching_rate = state.get_float64("MatchingRate"); std::size_t const size = num_keys / occupancy; std::vector<Key> h_keys(num_keys); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_keys); generate_keys<Dist, Multiplicity, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_keys; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } generate_probe_keys<Key>(matching_rate, h_keys.begin(), h_keys.end()); thrust::device_vector<Key> d_keys(h_keys); thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); state.add_element_count(num_keys, "NumKeys"); cuco::static_multimap<Key, Value> map{size, -1, -1}; map.insert(d_pairs.begin(), d_pairs.end()); auto const output_size = map.count_outer(d_keys.begin(), d_keys.end()); thrust::device_vector<cuco::pair_type<Key, Value>> d_results(output_size); state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto count = map.count_outer(d_keys.begin(), d_keys.end(), launch.get_stream()); map.retrieve_outer(d_keys.begin(), d_keys.end(), d_results.data().get(), launch.get_stream()); }); } template <typename Key, typename Value, dist_type Dist, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) != sizeof(Value)), void> nvbench_static_multimap_query( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Dist>, nvbench::enum_type<Multiplicity>>) { state.skip("Key should be the same type as Value."); } /** * @brief A benchmark evaluating `pair_retrieve` performance: * - CG size: 8 */ template <typename Key, typename Value, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) == sizeof(Value)), void> nvbench_static_multimap_pair_retrieve( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Multiplicity>>) { auto constexpr matching_rate = 0.5; auto constexpr occupancy = 0.5; auto constexpr dist = dist_type::UNIFORM; auto const num_input = state.get_int64("NumInputs"); std::size_t const size = num_input / occupancy; std::vector<Key> h_keys(num_input); std::vector<cuco::pair_type<Key, Value>> h_pairs(num_input); generate_keys<dist, Multiplicity, Key>(h_keys.begin(), h_keys.end()); for (auto i = 0; i < num_input; ++i) { Key key = h_keys[i]; Value val = h_keys[i]; h_pairs[i].first = key; h_pairs[i].second = val; } thrust::device_vector<cuco::pair_type<Key, Value>> d_pairs(h_pairs); auto const pair_begin = d_pairs.begin(); cuco::static_multimap<Key, Value> map{size, -1, -1}; map.insert(pair_begin, pair_begin + num_input); generate_probe_keys<Key>(matching_rate, h_keys.begin(), h_keys.end()); thrust::device_vector<Key> d_keys(h_keys); thrust::transform( thrust::device, d_keys.begin(), d_keys.begin() + num_input, pair_begin, [] __device__(Key i) { return cuco::pair_type<Key, Value>{i, i}; }); state.add_element_count(num_input, "NumInputs"); auto const output_size = map.pair_count(pair_begin, pair_begin + num_input, pair_equal<Key, Value>{}); thrust::device_vector<cuco::pair_type<Key, Value>> d_results(output_size); auto out1_begin = thrust::make_zip_iterator( thrust::make_tuple(thrust::make_discard_iterator(), thrust::make_discard_iterator())); auto out2_begin = thrust::make_zip_iterator( thrust::make_tuple(thrust::make_discard_iterator(), thrust::make_discard_iterator())); state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) { auto [out1_end, out2_end] = map.pair_retrieve( pair_begin, pair_begin + num_input, out1_begin, out2_begin, pair_equal<Key, Value>{}); }); } template <typename Key, typename Value, nvbench::int32_t Multiplicity> std::enable_if_t<(sizeof(Key) != sizeof(Value)), void> nvbench_static_multimap_pair_retrieve( nvbench::state& state, nvbench::type_list<Key, Value, nvbench::enum_type<Multiplicity>>) { state.skip("Key should be the same type as Value."); } using key_type = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>; using value_type = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>; using d_type = nvbench::enum_type_list<dist_type::GAUSSIAN, dist_type::GEOMETRIC, dist_type::UNIFORM>; using multiplicity = nvbench::enum_type_list<1, 2, 4, 8, 16, 32, 64, 128, 256>; NVBENCH_BENCH_TYPES(nvbench_static_multimap_insert, NVBENCH_TYPE_AXES(key_type, value_type, nvbench::enum_type_list<dist_type::UNIFORM>, multiplicity)) .set_name("staic_multimap_insert_uniform_multiplicity") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_insert, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_insert_occupancy") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", nvbench::range(0.1, 0.9, 0.1)); NVBENCH_BENCH_TYPES(nvbench_static_multimap_count, NVBENCH_TYPE_AXES(key_type, value_type, nvbench::enum_type_list<dist_type::UNIFORM>, multiplicity)) .set_name("staic_multimap_count_uniform_multiplicity") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_count, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_count_occupancy") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", nvbench::range(0.1, 0.9, 0.1)) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_count, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_count_matching_rate") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_retrieve, NVBENCH_TYPE_AXES(key_type, value_type, nvbench::enum_type_list<dist_type::UNIFORM>, multiplicity)) .set_name("staic_multimap_retrieve_uniform_multiplicity") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_retrieve, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_retrieve_occupancy") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", nvbench::range(0.1, 0.9, 0.1)) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_retrieve, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_retrieve_matching_rate") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_query, NVBENCH_TYPE_AXES(key_type, value_type, nvbench::enum_type_list<dist_type::UNIFORM>, multiplicity)) .set_name("staic_multimap_query_uniform_multiplicity") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_query, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_query_occupancy") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", nvbench::range(0.1, 0.9, 0.1)) .add_float64_axis("MatchingRate", {0.5}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_query, NVBENCH_TYPE_AXES(key_type, value_type, d_type, nvbench::enum_type_list<8>)) .set_name("staic_multimap_query_matching_rate") .set_type_axes_names({"Key", "Value", "Distribution", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {100'000'000}) // Total number of key/value pairs: 100'000'000 .add_float64_axis("Occupancy", {0.8}) .add_float64_axis("MatchingRate", {0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1}); NVBENCH_BENCH_TYPES(nvbench_static_multimap_pair_retrieve, NVBENCH_TYPE_AXES(key_type, value_type, multiplicity)) .set_name("staic_multimap_pair_retrieve_uniform_multiplicity") .set_type_axes_names({"Key", "Value", "Multiplicity"}) .set_timeout(100) // Custom timeout: 100 s. Default is 15 s. .set_max_noise(3) // Custom noise: 3%. By default: 0.5%. .add_int64_axis("NumInputs", {1'000, 100'000, 1'000'000, 10'000'000, 100'000'000}); // Total number of key/value pairs: 100'000'000
the_stack
#include <iostream> #include <fstream> #include <cmath> #include <cstdlib> #include <cstdio> #include <cstring> #include <mpi.h> #define CUDA #ifdef OMP #error Sorry, OpenMP is currently disabled. #define THRUST_DEVICE_SYSTEM THRUST_DEVICE_BACKEND_OMP #undef CUDA #define PARALLEL_GET_TID omp_get_thread_num() #define PARALLEL_ADVANCE omp_get_num_threads() #define __global__ // #include <complex> #else #define PARALLEL_GET_TID threadIdx.x + blockIdx.x * blockDim.x #define PARALLEL_ADVANCE blockDim.x * gridDim.x // #include "cuda_complex.hpp" #endif // #define complex complex<Real> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/partition.h> #include <thrust/inner_product.h> #include "common.hpp" #include "io.hpp" #include "ic.hpp" #include "integrate.hpp" #ifdef MEX #define method mex #include "mex.hpp" #elif defined(SCF) #define method scf #include "scf.hpp" #endif using namespace std; using namespace etics; // GLOBAL VARIABLES int MyRank, NumProcs; Real ConstantStep = 0.001953125; Real T, Step, dT1, dT2, Tcrit, FileTime; int NSteps = 0, FileSnapshotNum; struct ReorderingFunctor { __host__ __device__ bool operator() (const Particle &lhs, const Particle &rhs) { return (lhs.ID <= rhs.ID); } }; Real CalculateStepSize() { return ConstantStep; } void DisplayInformation(Integrator IntegratorObj) { Real Ek = IntegratorObj.KineticEnergy(); Real Ep = IntegratorObj.PotentialEnergy(); Real Energy = Ek + Ep; Real TotalEnergy; MPI_Reduce(&Energy, &TotalEnergy, 1, MPI_ETICS_REAL, MPI_SUM, 0, MPI_COMM_WORLD); int N=IntegratorObj.GetN(), TotalN; MPI_Reduce(&N, &TotalN, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (MyRank==0) { printf(" TIME =%6.2f NSTEPS =%6d ENERGY =%20.16f N = %d\n", T, NSteps, TotalEnergy, TotalN); fflush(stdout); } } void PrepareSnapshot(Integrator IntegratorObj, Particle **ParticleList, int *CurrentTotalN) { Particle *LocalList; int LocalBufferSize; IntegratorObj.CopyParticlesToHost(&LocalList, &LocalBufferSize); LocalBufferSize *= sizeof(Particle); int BufferSizes[NumProcs]; MPI_Gather(&LocalBufferSize, 1, MPI_INT, BufferSizes, 1, MPI_INT, 0, MPI_COMM_WORLD); int Displacements[NumProcs]; int TotalN = 0; if (MyRank==0) { for (int p = 0; p < NumProcs; p++) TotalN += BufferSizes[p]/sizeof(Particle); Displacements[0] = 0; for (int p = 1; p < NumProcs; p++) Displacements[p] = Displacements[p-1] + BufferSizes[p-1]; *ParticleList = new Particle[TotalN]; } MPI_Gatherv(LocalList, LocalBufferSize, MPI_BYTE, *ParticleList, BufferSizes, Displacements, MPI_BYTE, 0, MPI_COMM_WORLD); #ifdef MEX thrust::sort(*ParticleList, (*ParticleList)+TotalN, ReorderingFunctor()); #endif *CurrentTotalN = TotalN; } int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); MPI_Comm_size(MPI_COMM_WORLD, &NumProcs); if (MyRank==0) { cerr << "Welcome to ETICS..." << endl; #ifdef MEX cerr << "Using method: MEX" << endl; cerr << "LMAX=" << LMAX << endl; #elif defined(SCF) cerr << "Using method: SCF" << endl; cerr << "LMAX=" << LMAX << endl; cerr << "NMAX=" << NMAX << endl; #endif } string Filename; int DeviceID = 0; ParametersStruct Params; // Instead of reading the input file with MyRank=0 and broadcast the result, we let every rank read the file. This probably saves ~20 lines of ugly MPI code. ParseInput(argc, argv, &Params); int N = Params.N; // total; will be divided by number of processes Filename = Params.Filename; Tcrit = Params.Tcrit; ConstantStep = Params.ConstantStep; DeviceID = Params.DeviceID; dT1 = Params.dT1; dT2 = Params.dT2; if (DeviceID >= 0) { if (cudaSetDevice(DeviceID) != cudaSuccess) { cerr << "Problem opening device (ID=" << DeviceID << ")" << endl; exit(1); } } else { cerr << "Skipping call to cudaSetDevice." << endl; } // Read an input file and initialize the global particle structure. Particle *FullList; if (MyRank==0) { if ((Filename == "_nofile_") || (Filename == "_hernquist_")) { cout << "Generating a Hernquist sphere..." << endl; etics::ic::hernquist(N, Params.Seed, &FullList); FileSnapshotNum = 0; FileTime = 0; cout << "Done." << endl; } else if (Filename == "_plummer_") { cout << "Generating a Plummer sphere..." << endl; etics::ic::plummer(N, Params.Seed, &FullList); FileSnapshotNum = 0; FileTime = 0; cout << "Done." << endl; } else { string InputFileSuffix = Filename.substr(Filename.find_last_of("."), Filename.length()-Filename.find_last_of(".")); if ((InputFileSuffix==".h5part") || (InputFileSuffix==".hdf5") || (InputFileSuffix==".h5")) { #ifndef ETICS_HDF5 cerr << "Compiled without the \"ETICS_HDF5\" flag; cannot read input in this format." << endl; exit(1); #else ReadICsHDF5(Filename, N, &FullList, &FileSnapshotNum, &FileTime); #endif } else ReadICsASCII(Filename, N, &FullList, &FileSnapshotNum, &FileTime); } } #ifndef ETICS_HDF5 if (Params.OutputFormat == "hdf5") { cerr << "Compiled without the \"ETICS_HDF5\" flag; cannot output in requested format." << endl; exit(1); } #endif if (!(Params.OutputFormat == "hdf5") && !(Params.OutputFormat == "ascii")) { cerr << "Requested output format unrecognized." << endl; exit(1); } int LocalN = N / NumProcs; int Remainder = N - LocalN*NumProcs; if (MyRank==NumProcs-1) LocalN += Remainder; Particle *LocalList = new Particle[LocalN]; int BufferSizes[NumProcs]; int Displacements[NumProcs]; if (MyRank==0) { for (int p = 0; p < NumProcs; p++) BufferSizes[p] = (N / NumProcs)*sizeof(Particle); BufferSizes[NumProcs-1] += Remainder*sizeof(Particle); Displacements[0] = 0; for (int p = 1; p < NumProcs; p++) Displacements[p] = Displacements[p-1] + BufferSizes[p-1]; } MPI_Scatterv(FullList, BufferSizes, Displacements, MPI_BYTE, LocalList, LocalN*sizeof(Particle), MPI_BYTE, 0, MPI_COMM_WORLD); if (MyRank==0) free(FullList); N = LocalN; // Here we ask each MPI process to report cudaDeviceProp DeviceProperties; const int etics_str_len = 256; cudaGetDeviceProperties(&DeviceProperties, 0); char ProcessorName[etics_str_len]; int tmp; MPI_Get_processor_name(ProcessorName, &tmp); char UniqueDeviceID[etics_str_len]; sprintf(UniqueDeviceID, "%d$$$%s", DeviceProperties.pciBusID, ProcessorName); char Message[etics_str_len]; sprintf(Message, "Hello from rank %d (of %d) on %s, using \"%s\" with PCI bus ID %d; this rank has %d particles.\n", MyRank, NumProcs, ProcessorName, DeviceProperties.name, DeviceProperties.pciBusID, LocalN); if (MyRank == 0) { printf(Message); fflush(stdout); for (int Rank = 1; Rank < NumProcs; Rank++) { MPI_Recv(Message, etics_str_len, MPI_CHAR, Rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); printf(Message); fflush(stdout); } } else { MPI_Send(Message, etics_str_len, MPI_CHAR, 0, 0, MPI_COMM_WORLD); } // Here we collect the GPU IDs from all MPI processes and print a warning if one GPU is assigned to more than one process. char *StrBuf; StrBuf = (char*)malloc(NumProcs*etics_str_len*sizeof(char)); MPI_Gather( UniqueDeviceID, etics_str_len, MPI_CHAR, StrBuf, etics_str_len, MPI_CHAR, 0, MPI_COMM_WORLD); if (MyRank == 0) { bool DuplicateFound = false; for (int i = 0; i < NumProcs; i++) { for (int j = i+1; j < NumProcs; j++) { if (strcmp(StrBuf+i*etics_str_len, StrBuf+j*etics_str_len) == 0) { DuplicateFound = true; break; } if (DuplicateFound) break; } } if (DuplicateFound) { printf("\x1B[31m!!SEVERE WARNING!!\x1B[0m It seems the same physical GPU device was assigned to multiple processes; check the submission script.\n"); } } free(StrBuf); // Now initiate the code method::Init(N, 0, 0, 0, 0); // Initiate the integrator Integrator IntegratorObj(LocalList, N); // More initializations. Real NextOutput = 0, NextSnapshot = 0; T = FileTime; int SnapNumber = FileSnapshotNum; Step = CalculateStepSize(); while (T <= Tcrit) { if (T >= NextOutput) { DisplayInformation(IntegratorObj); NextOutput += dT1; } if (T >= NextSnapshot) { int CurrentTotalN; PrepareSnapshot(IntegratorObj, &FullList, &CurrentTotalN); if (MyRank==0) { if (Params.OutputFormat == "ascii") WriteSnapshotASCII(Params.Prefix, SnapNumber, FullList, CurrentTotalN, T); #ifdef ETICS_HDF5 else if (Params.OutputFormat == "hdf5") WriteSnapshotHDF5(Params.Prefix, SnapNumber, FullList, CurrentTotalN, T); #endif else {cerr << "Error" << endl; exit(1);} free(FullList); } SnapNumber++; NextSnapshot += dT2; } // Take the drift step. IntegratorObj.DriftStep(Step); // Calculate the forces in the new positions. IntegratorObj.CalculateGravity(); // Finish by taking the kick step. // The kick functor also "commits" the predicted forces into the "acc" member. IntegratorObj.KickStep(Step); // N particles were implicitly propagated in this iteration. NSteps += 1; // Advance global time. T += Step; // Calculate the next step. Step = CalculateStepSize(); } IntegratorObj.~Integrator(); MPI_Finalize(); return 0; }
the_stack
#include "common.cuh" #include "util/woodruff_int128_t.hpp" #include "util/woodruff_uint128_t.hpp" #include "util/cpu_builtin_equivalents.hpp" #include <kat/on_device/builtins.cuh> #include <kat/on_device/non-builtins.cuh> #include <kat/on_device/collaboration/block.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace device_function_ptrs { #define PREPEND_TYPENAME_IDENTIFIER(t) , typename t #define PREPARE_BUILTIN_INNER(subnamespace, builtin_function_basename, t1, t2) \ template <t1> \ struct builtin_function_basename { \ static const void* const ptr; \ static const char* const name; \ }; \ \ template <t1> \ const void* const builtin_function_basename<t2>::ptr { (void *) kat::subnamespace::builtin_function_basename<t2> }; \ template <t1> \ const char* const builtin_function_basename<t2>::name { STRINGIZE(kat::subnamespace::builtin_function_basename) } #define COMMA , #define PREPARE_BUILTIN0(subnamespace, builtin_function_basename) \ struct builtin_function_basename { \ static const void* const ptr; \ static const char* const name; \ }; \ \ const void* const builtin_function_basename::ptr { (void *) kat::subnamespace::builtin_function_basename }; \ const char* const builtin_function_basename::name { STRINGIZE(kat::subnamespace::builtin_function_basename) } #define PREPARE_BUILTIN1(subnamespace, builtin_function_basename) PREPARE_BUILTIN_INNER(subnamespace, builtin_function_basename, typename T, T) #define PREPARE_BUILTIN2(subnamespace, builtin_function_basename) PREPARE_BUILTIN_INNER(subnamespace, builtin_function_basename, typename T1 COMMA typename T2, T1 COMMA T2) #define PREPARE_BUILTIN3(subnamespace, builtin_function_basename) PREPARE_BUILTIN_INNER(subnamespace, builtin_function_basename, typename T1 COMMA typename T2 COMMA typename T3, T1 COMMA T2 COMMA T3) #define INSTANTIATE_BUILTIN_VIA_PTR(builtin_function_basename, ...) \ template struct builtin_function_basename<__VA_ARGS__> PREPARE_BUILTIN1(builtins, multiplication_high_bits); PREPARE_BUILTIN1(builtins, divide); PREPARE_BUILTIN1(builtins, absolute_value); PREPARE_BUILTIN1(builtins, minimum); PREPARE_BUILTIN1(builtins, maximum); PREPARE_BUILTIN1(builtins, sum_with_absolute_difference); PREPARE_BUILTIN1(builtins, population_count); PREPARE_BUILTIN1(builtins, bit_reverse); PREPARE_BUILTIN1(builtins, find_leading_non_sign_bit); PREPARE_BUILTIN1(builtins::bit_field, extract_bits); PREPARE_BUILTIN1(builtins::bit_field, replace_bits); PREPARE_BUILTIN0(builtins, permute_bytes); // This function is special, in that one of its template parameters is a value rather than a type. PREPARE_BUILTIN_INNER(builtins, funnel_shift_right, kat::builtins::funnel_shift_amount_resolution_mode_t AmountResolutionMode, AmountResolutionMode); PREPARE_BUILTIN_INNER(builtins, funnel_shift_left, kat::builtins::funnel_shift_amount_resolution_mode_t AmountResolutionMode, AmountResolutionMode); // PREPARE_BUILTIN0(builtins, funnel_shift); PREPARE_BUILTIN1(builtins, average); PREPARE_BUILTIN1(builtins, average_rounded_up); PREPARE_BUILTIN0(builtins::special_registers, lane_index); PREPARE_BUILTIN0(builtins::special_registers, symmetric_multiprocessor_index); PREPARE_BUILTIN0(builtins::special_registers, grid_index); PREPARE_BUILTIN0(builtins::special_registers, dynamic_shared_memory_size); PREPARE_BUILTIN0(builtins::special_registers, total_shared_memory_size); #if (__CUDACC_VER_MAJOR__ >= 9) // Note: These three ballot functions were available before CUDA 9, but for // now we're only testing the CUDA 9 versions. PREPARE_BUILTIN0(builtins::warp, ballot); PREPARE_BUILTIN0(builtins::warp, all_lanes_satisfy); PREPARE_BUILTIN0(builtins::warp, any_lanes_satisfy); #if ! defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700 PREPARE_BUILTIN0(builtins::warp, all_lanes_agree); PREPARE_BUILTIN1(builtins::warp, propagate_mask_if_lanes_agree); PREPARE_BUILTIN1(builtins::warp, propagate_mask_if_warp_agrees); PREPARE_BUILTIN1(builtins::warp, get_matching_lanes); #endif #endif PREPARE_BUILTIN0(builtins::warp::mask_of_lanes, preceding); PREPARE_BUILTIN0(builtins::warp::mask_of_lanes, preceding_and_self); PREPARE_BUILTIN0(builtins::warp::mask_of_lanes, self); PREPARE_BUILTIN0(builtins::warp::mask_of_lanes, succeeding_and_self); PREPARE_BUILTIN0(builtins::warp::mask_of_lanes, succeeding); PREPARE_BUILTIN1(non_builtins, find_first_set); PREPARE_BUILTIN1(non_builtins, count_trailing_zeros); PREPARE_BUILTIN1(non_builtins, count_leading_zeros); } // namespace builtin_device_function_ptrs namespace kernels { template <typename DeviceFunctionHook, typename R, typename... Is> __global__ void execute_testcases( // F f, size_t num_checks, fake_bool* __restrict__ execution_complete, R* __restrict__ results, const Is* __restrict__ ... inputs ) { auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x; auto check_index = global_thread_index; if (check_index >= num_checks) { return; } using device_function_type = auto (Is...) -> R; auto f = (device_function_type*) DeviceFunctionHook::ptr; // printf("function ptr is at %p and name is %s\n", f, DeviceFunctionHook::name); results[check_index] = f(inputs[check_index]...); // It's up to the author of f to ensure there aren't any runtime errors... // Also, f should not use any shared memory // printf("Thread %3u = (%2u,%2u), result %x, value %d, mask %x, sizeof...(inputs) = %u\n", // (unsigned) i, (unsigned) i / 32 , (unsigned) i % 32, results[i], inputs[i]... , (unsigned) sizeof...(inputs)); execution_complete[check_index] = true; } } // namespace kernels namespace detail { template <typename T> struct multiplication_result_helper { }; template <> struct multiplication_result_helper<unsigned>{ static_assert(sizeof(unsigned) == 4, "Unexpected size"); static_assert(sizeof(unsigned long long) == 8, "Unexpected size"); using type = unsigned long long; }; template <> struct multiplication_result_helper<long long>{ static_assert(sizeof(long long) == 8, "Unexpected size"); #ifdef __SIZEOF_INT128__ using type = __int128_t; #else #warning "Untrustworthy 128-bit int implementation." using type = int128_t; #endif }; template <> struct multiplication_result_helper<unsigned long long>{ static_assert(sizeof(unsigned long long) == 8, "Unexpected size"); #ifdef __SIZEOF_INT128__ using type = __uint128_t; #else #warning "Untrustworthy 128-bit uint implementation." using type = uint128_t; #endif }; } // namespace detail template <typename T> using multiplication_result_t = typename detail::multiplication_result_helper<T>::type; template <typename I> I multiplication_high_bits(I lhs, I rhs) { multiplication_result_t<I> lhs_ { lhs }, rhs_ { rhs }; auto m = lhs_ * rhs_; auto high_bits = m >> ((unsigned) size_in_bits<I>()); // std::cout << std::hex << lhs << " * " << rhs_ << " = " << m << std::dec // << "; I has " << size_in_bits<I>() << " and m has " << size_in_bits(m) << ". high bits: " << high_bits << " after cast " << I(high_bits) << "\n"; return I(high_bits); } template <typename T> struct empty { }; template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } bool check_execution_indicators( size_t num_checks, const char* testcase_name, fake_bool* execution_indicators) { std::stringstream ss; auto index_width = set_width_for_up_to(num_checks); bool all_executed { true }; // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_checks; i++) { ss.str(""); ss << "Failed executing testcase " << (i+1) << " for " << testcase_name; auto failure_message { ss.str() }; CHECK_MESSAGE(execution_indicators[i], failure_message); all_executed = all_executed and execution_indicators[i]; } return all_executed; } namespace detail { template <typename T> T tolerance_gadget(std::true_type, optional<T> x) { return x.value(); } template <typename T> int tolerance_gadget(std::false_type, optional<T>) { return 0; } } // namespace detail template <typename T> std::conditional_t<std::is_arithmetic<T>::value, T, int> tolerance_gadget(optional<T> x) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x); } // TODO: Take iterator templates rather than pointers template <typename R, typename F, typename... Is> void check_results( size_t num_checks, const char* testcase_name, // perhaps add another parameter for specific individual-check details? const R* __restrict__ actual_results, F expected_result_retriever, optional<R> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_checks); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_checks; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << testcase_name // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { auto tolerance = tolerance_gadget(comparison_tolerance_fraction); // With C++17, we could just use if constexpr and never try to compare against // a non-arithmetic type CHECK_MESSAGE(actual_results[i] == doctest::Approx(expected_result_retriever(i)).epsilon(tolerance), mismatch_message); } else { CHECK_MESSAGE(actual_results[i] == expected_result_retriever(i), mismatch_message); } } } template <typename T> struct tag { }; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename K, typename R, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<R>, std::index_sequence<Indices...>, K testcase_kernel, const char* testcase_name, cuda::launch_configuration_t launch_config, size_t num_checks, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<R[]>(device, num_checks) }; cuda::memory::device::zero(device_side_results.get(), num_checks * sizeof(R)); // just to be on the safe side auto device_side_execution_indicators { cuda::memory::device::make_unique<fake_bool[]>(device, num_checks * sizeof(fake_bool)) }; cuda::memory::device::zero(device_side_execution_indicators.get(), num_checks * sizeof(fake_bool)); // this is actually necessary auto host_side_results { std::vector<R>(num_checks) }; auto host_side_execution_indicators { std::vector<fake_bool>(num_checks) }; auto make_device_side_input = [&device, num_checks](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_checks * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_checks)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, num_checks, device_side_execution_indicators.get(), device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(R) * num_checks); cuda::memory::copy(host_side_execution_indicators.data(), device_side_execution_indicators.get(), sizeof(bool) * num_checks); check_execution_indicators(num_checks, testcase_name, host_side_execution_indicators.data()); return host_side_results; } template <typename K, typename R, typename... Is, size_t... Indices> void execute_testcase_on_gpu_and_check( std::index_sequence<Indices...> is, const R* __restrict__ expected_results, K testcase_kernel, const char* testcase_name, cuda::launch_configuration_t launch_config, size_t num_checks, optional<R> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto host_side_results = execute_testcase_on_gpu( tag<R>{}, is, testcase_kernel, testcase_name, launch_config, num_checks, inputs...); auto expected_result_retriever = [&](size_t pos) { return expected_results[pos]; }; check_results ( num_checks, testcase_name, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_result_retriever, comparison_tolerance_fraction, inputs...); } template <typename DeviceFunctionHook, typename R, typename... Is> void execute_uniform_builtin_testcase_on_gpu_and_check( DeviceFunctionHook dfh, const R* __restrict__ expected_results, size_t num_checks, optional<R> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto block_size { 128 }; auto num_grid_blocks { div_rounding_up(num_checks, block_size) }; auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; auto host_side_results = execute_testcase_on_gpu( tag<R>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcases<DeviceFunctionHook, R, Is...>, DeviceFunctionHook::name, launch_config, num_checks, inputs... ); auto expected_result_retriever = [&](size_t pos) { return expected_results[pos]; }; check_results ( num_checks, DeviceFunctionHook::name, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_result_retriever, comparison_tolerance_fraction, inputs...); } template <typename DeviceFunctionHook, typename R, typename... Is> void execute_non_uniform_builtin_testcase_on_gpu_and_check( DeviceFunctionHook dfh, const R* __restrict__ expected_results, size_t num_checks, cuda::grid::dimension_t num_grid_blocks, cuda::grid::block_dimension_t block_size, optional<R> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) }; // TODO: Should we check that num_checks is equal to the number of grid threads? auto host_side_results = execute_testcase_on_gpu( tag<R>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcases<DeviceFunctionHook, R, Is...>, DeviceFunctionHook::name, launch_config, num_checks, inputs... ); auto expected_result_retriever = [&](size_t pos) { return expected_results[pos]; }; check_results ( num_checks, DeviceFunctionHook::name, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_result_retriever, comparison_tolerance_fraction, inputs...); } // Builtins whose behavior is uniform across all grid threads, and does not depend on data held by other threads TEST_SUITE("uniform builtins") { // Note: Types for instantiation are chosen based on what's actually available in CUDA TEST_CASE_TEMPLATE("multiplication high bits", I, unsigned, long long, unsigned long long) { using result_type = I; std::vector<result_type> expected_results; std::vector<I> lhs; std::vector<I> rhs; auto add_check = [&](I x, I y) { lhs.emplace_back(x); rhs.emplace_back(y); // std::cout << "testcase " << expected_results.size() + 1 << ": "; auto result = multiplication_high_bits(x, y); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto almost_sqrt = (I{1} << half_num_bits) - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; // Yields 0 add_check(0, 0); add_check(1, 0); add_check(0, 1); add_check(1, 1); add_check(almost_sqrt, almost_sqrt); // Yields 1 add_check(mid_bit_on, mid_bit_on); // Yields 6 add_check(mid_bit_on * 2, mid_bit_on * 3); add_check(mid_bit_on * 3, mid_bit_on * 2); // Depends... add_check(min, min); add_check(min, max); add_check(max, min); add_check(max, max); auto num_checks = expected_results.size(); // std::cout << "function is at " << (void *)(kat::builtins::multiplication_high_bits<I>) << std::endl; execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::multiplication_high_bits<I>{}, // kat::builtins::multiplication_high_bits<I>, expected_results.data(), num_checks, make_exact_comparison<result_type>, lhs.data(), rhs.data()); } TEST_CASE_TEMPLATE("minimum", T, int, unsigned int, long, unsigned long, long long, unsigned long long, float , double) { using result_type = T; std::vector<result_type> expected_results; std::vector<T> lhs; std::vector<T> rhs; auto add_check = [&](T x, T y) { lhs.emplace_back(x); rhs.emplace_back(y); auto result = std::min<T>(x, y); // Note: This is not a trivial choice! The behavior in edge cases, // like or near-equality for floating-point types, is not the same // among any two implementations of a "minimum()" function. expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<T>::max(); constexpr const auto min = std::numeric_limits<T>::min(); constexpr const auto half_num_bits = size_in_bits<T>() / 2; constexpr const auto half_num_bits_max_bits = (T{ uint64_t{1} << half_num_bits}) - 1; constexpr const auto mid_bit_on = T{uint64_t{1} << half_num_bits}; // Note that for floating-point types, bit-counting is not that meaningful add_check(0, 0); add_check(1, 0); add_check(0, 1); add_check(1, 1); add_check(half_num_bits_max_bits, half_num_bits_max_bits); add_check(mid_bit_on, mid_bit_on); add_check(mid_bit_on * 2, mid_bit_on * 3); add_check(mid_bit_on * 3, mid_bit_on * 2); add_check(min, min); add_check(min, max); add_check(max, min); add_check(max, max); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::minimum<T>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, lhs.data(), rhs.data()); } TEST_CASE_TEMPLATE("maximum", T, int, unsigned int, long, unsigned long, long long, unsigned long long, float, double) { std::vector<T> expected_results; std::vector<T> lhs; std::vector<T> rhs; auto add_check = [&](T x, T y) { lhs.emplace_back(x); rhs.emplace_back(y); auto result = std::max<T>(x, y); // Note: This is not a trivial choice! The behavior in edge cases, // like or near-equality for floating-point types, is not the same // among any two implementations of a "minimum()" function. expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<T>::max(); constexpr const auto min = std::numeric_limits<T>::min(); constexpr const auto half_num_bits = size_in_bits<T>() / 2; constexpr const auto half_num_bits_max_bits = (T{ uint64_t{1} << half_num_bits}) - 1; constexpr const auto mid_bit_on = T{uint64_t{1} << half_num_bits}; // Note that for floating-point types, bit-counting is not that meaningful add_check(0, 0); add_check(1, 0); add_check(0, 1); add_check(1, 1); add_check(half_num_bits_max_bits, half_num_bits_max_bits); add_check(mid_bit_on, mid_bit_on); add_check(mid_bit_on * 2, mid_bit_on * 3); add_check(mid_bit_on * 3, mid_bit_on * 2); add_check(min, min); add_check(min, max); add_check(max, min); add_check(max, max); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::maximum<T>{}, expected_results.data(), num_checks, make_exact_comparison<T>, lhs.data(), rhs.data()); } TEST_CASE_TEMPLATE("absolute_value", T, int, long, long long, float, double, unsigned char, unsigned short, unsigned, unsigned long, unsigned long long) { std::vector<T> expected_results; std::vector<T> values; auto add_check = [&](T x) { values.emplace_back(x); auto result = absolute_value(x); expected_results.emplace_back(result); }; add_check(0); add_check(1); add_check(10); add_check(T(-1)); add_check(T(-10)); add_check(std::numeric_limits<T>::max()); add_check(std::numeric_limits<T>::min()); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::absolute_value<T>{}, expected_results.data(), num_checks, make_exact_comparison<T>, values.data()); } TEST_CASE_TEMPLATE("divide", T, float, double) { std::vector<T> expected_results; std::vector<T> dividends; std::vector<T> divisors; auto add_check = [&](T x, T y) { dividends.emplace_back(x); divisors.emplace_back(y); auto result = x / y; // Note: This is not a trivial choice - it depends on the exact floating-point // implementation on the CPU; and rounding choices... expected_results.emplace_back(result); }; // constexpr const auto max = std::numeric_limits<T>::max(); // constexpr const auto min = std::numeric_limits<T>::min(); constexpr const auto infinity = std::numeric_limits<T>::infinity(); // Should yield 0 add_check(0, 1); add_check(0, 2); add_check(0, infinity); // This fails: We get nan's but should get 0 // add_check(0, max); // add_check(0, min); add_check(T{0.5696892130}, T{0.0300253556}); add_check(T{0.8300151169975111343}, T{0.99338683191717680375}); // TODO: More testcases wouldn't hurt auto num_checks = expected_results.size(); optional<T> comparison_tolerance_fraction { 1e-6 }; execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::divide<T>{}, expected_results.data(), num_checks, comparison_tolerance_fraction, dividends.data(), divisors.data()); } TEST_CASE_TEMPLATE("sum_with_absolute_difference", I, int16_t, int32_t ,int64_t, uint16_t, uint32_t, uint64_t) { using uint_t = std::make_unsigned_t<I>; using result_type = uint_t; std::vector<result_type> expected_results; std::vector<uint_t> addends; std::vector<I> x_values; std::vector<I> y_values; auto add_check = [&](I x, I y, uint_t addend) { x_values.emplace_back(x); y_values.emplace_back(y); addends.emplace_back(addend); // std::cout << "Testcase " << (x_values.size()+1) << " : absolute_difference(x, y) = " << absolute_difference(x, y) // << ", addend + I(absolute_difference(x, y)) = " << addend + I(absolute_difference(x, y)) << '\n'; auto result = addend + I(absolute_difference(x, y)); // The non-trivial choice here - conversion from the difference type to unsigned expected_results.emplace_back(result); }; constexpr const auto max_uint = std::numeric_limits<uint_t>::max(); constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; // constexpr const auto half_num_bits_max_bits = (I{1} << half_num_bits) - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; // Should yield 0 // ... but be careful - if you try to check some of these values in mid-flight // you might get stung by integer promotion. I know I have :-( add_check(I(0), I(0), 0); add_check(I(1), I(1), 0); add_check(I(min), I(min), 0); add_check(I(mid_bit_on), I(mid_bit_on), 0); add_check(I(1), I(0), max_uint); add_check(I(0), I(1), max_uint); // Should yield 1 << 15 int16_t, 1 << 16 for uint16_t, 0 otherwise add_check(I(max), I(0), 1); // Should yield 1 for unsigned, 0 for 32-bit types, maybe also for 64-bit types add_check(I(0), I(min), 1); // Should yield: // 1 << 16 for int16_t and uint16_t (I think) // 0 for int32_t and uint32_t // 0 for int64_t and uint64_t add_check(I(max), I(min), 1); // Should yield 123 add_check(I(max), I(max), 123); add_check(I(min), I(min), 123); add_check(I(mid_bit_on), I(mid_bit_on), 123); // Should yield 2 * mid_bit_on + 1 for all add_check(I(mid_bit_on), I(-mid_bit_on), 1); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::sum_with_absolute_difference<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, x_values.data(), y_values.data(), addends.data() ); } TEST_CASE_TEMPLATE("population_count", I, uint8_t, uint16_t, uint32_t, uint64_t) { using result_type = int; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x) { values.emplace_back(x); auto result = population_count<I>(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto mid_bit_on = I{1} << half_num_bits; add_check(0); add_check(1); add_check(2); add_check(3); add_check(4); add_check(8); add_check(16); add_check(31); add_check(32); add_check(33); add_check(max); add_check(mid_bit_on - 1); add_check(mid_bit_on); add_check(mid_bit_on + 1); add_check(max - 1); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::population_count<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } TEST_CASE_TEMPLATE("bit_reverse", I, uint32_t, uint64_t, unsigned long) { using result_type = I; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x) { values.emplace_back(x); auto result = bit_reverse<I>(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto mid_bit_on = I{1} << half_num_bits; add_check(0); add_check(0b1); add_check(0b10); add_check(0b11); add_check(0b101); add_check(mid_bit_on - 1); add_check(mid_bit_on); add_check(mid_bit_on + 1); add_check(~ (mid_bit_on - 1)); add_check(~ (mid_bit_on)); add_check(~ (mid_bit_on + 1)); add_check(max - 1); add_check(max); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::bit_reverse<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } TEST_CASE_TEMPLATE("find_leading_non_sign_bit", I, int, unsigned, long, unsigned long, long long, unsigned long long) { using result_type = uint32_t; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x, result_type result) { values.emplace_back(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto mid_bit_index = half_num_bits - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; constexpr const auto no_nonsign_bits { std::numeric_limits<uint32_t>::max() }; constexpr const auto msb_index { size_in_bits<I>() - 1 }; add_check(0, no_nonsign_bits); add_check(0b1, 0); add_check(0b10, 1); add_check(0b11, 1); add_check(0b101, 2); add_check(mid_bit_on - 1, half_num_bits - 1); add_check(mid_bit_on, half_num_bits); add_check(mid_bit_on + 1, half_num_bits); if (std::is_unsigned<I>::value) { add_check(I(~ (mid_bit_on - 1)), msb_index); add_check(I(~ (mid_bit_on)), msb_index); add_check(I(~ (mid_bit_on + 1)), msb_index); add_check(max - 1, msb_index); add_check(max, msb_index); } else { add_check(I(-0b1), no_nonsign_bits); add_check(I(-0b1010), 3); add_check(~ (mid_bit_on - 1), mid_bit_index); add_check(~ (mid_bit_on), mid_bit_index + 1); add_check(~ (mid_bit_on + 1), mid_bit_index + 1); add_check(max - 1, msb_index - 1); add_check(max, msb_index - 1); add_check(min, msb_index - 1); add_check(min + 1, msb_index - 1); add_check(min + 2, msb_index - 1); } auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::find_leading_non_sign_bit<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } // Not testing ldg/load_global_with_non_coherent_cache here, since such a test is too dissimilar from the // rest of the builtin tests. TEST_CASE("select_bytes") { using result_type = uint32_t; std::vector<result_type> expected_results; std::vector<uint32_t> low_words; std::vector<uint32_t> high_words; std::vector<uint32_t> selectors_words; auto add_check = [&](uint32_t x, uint32_t y, uint32_t selectors, uint32_t result) { low_words.emplace_back(x); high_words.emplace_back(y); selectors_words.emplace_back(selectors); expected_results.emplace_back(result); }; auto make_selector = []( unsigned first_byte, unsigned second_byte, unsigned third_byte, unsigned fourth_byte) { auto selectors_are_valid = (first_byte <= 0xF) and (second_byte <= 0xF) and (third_byte <= 0xF) and (fourth_byte <= 0xF); REQUIRE(selectors_are_valid); // { throw std::invalid_argument("Invalid byte selectors for PTX prmt"); } return first_byte | (second_byte << 4) | (third_byte << 8) | (fourth_byte << 12); }; constexpr const auto replicate_sign { 0b1000 }; // constexpr const auto copy_value { 0b0000 }; add_check(0x33221100, 0x77665544, make_selector(0,0,0,0), 0 ); add_check(0x33221100, 0x77665544, make_selector(1,1,1,1), 0x11111111 ); add_check(0x33221100, 0x77665544, make_selector(2,2,2,2), 0x22222222 ); add_check(0x33221100, 0x77665544, make_selector(3,3,3,3), 0x33333333 ); add_check(0x33221100, 0x77665544, make_selector(4,4,4,4), 0x44444444 ); add_check(0x33221100, 0x77665544, make_selector(5,5,5,5), 0x55555555 ); add_check(0x33221100, 0x77665544, make_selector(6,6,6,6), 0x66666666 ); add_check(0x33221100, 0x77665544, make_selector(0,1,2,3), 0x33221100 ); add_check(0x33221100, 0x77665544, make_selector(3,2,1,0), 0x00112233 ); add_check(0x33221100, 0x77665544, make_selector(7,6,5,4), 0x44556677 ); add_check(0x33221100, 0x77665544, make_selector(2,3,4,5), 0x55443322 ); add_check(0x00000000, 0x00000000, make_selector(0 | replicate_sign,0 | replicate_sign,0 | replicate_sign,0 | replicate_sign), 0x0 ); add_check(0xA0A0A000, 0xA0A0A0A0, make_selector(1 | replicate_sign,0, 0, 0), 0x000000FF ); add_check(0xA0A0A0A0, 0xA0A0A0A0, make_selector(0 | replicate_sign,0 | replicate_sign,0 | replicate_sign,0 | replicate_sign), 0xFFFFFFFF ); add_check(0xA0A0A0A0, 0xA0A0A0A0, make_selector(1 | replicate_sign,2 | replicate_sign,3 | replicate_sign,4 | replicate_sign), 0xFFFFFFFF ); add_check(0x11111111, 0x11111111, make_selector(6, 7 | replicate_sign,1 | replicate_sign, 1 | replicate_sign), 0x00000011 ); add_check(0x33221100, 0x77665544, make_selector(7,6 | replicate_sign,5 | replicate_sign,4), 0x44000077 ); add_check(0x33221100, 0x77665544, make_selector(1,1 | replicate_sign,2,2 | replicate_sign), 0x00220011 ); add_check(0x33F2F100, 0x77665544, make_selector(1,1 | replicate_sign,2,2 | replicate_sign), 0xFFF2FFF1 ); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::permute_bytes{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, low_words.data(), high_words.data(), selectors_words.data() ); } TEST_CASE_TEMPLATE("count_leading_zeros", I, int32_t, uint32_t, int64_t, uint64_t) { using result_type = int32_t; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x, result_type result) { values.emplace_back(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto num_all_bits = size_in_bits<I>(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; // constexpr const auto mid_bit_index = half_num_bits - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; // constexpr const auto msb_index { size_in_bits<I>() - 1 }; add_check(0, num_all_bits); add_check(0b1, num_all_bits - 1); add_check(0b10, num_all_bits - 2); add_check(0b11, num_all_bits - 2); add_check(0b101, num_all_bits - 3); add_check(mid_bit_on - 1, half_num_bits); add_check(mid_bit_on, half_num_bits - 1); add_check(mid_bit_on + 1, half_num_bits - 1); add_check(~ (mid_bit_on - 1), 0); add_check(~ (mid_bit_on), 0); add_check(~ (mid_bit_on + 1), 0); if (std::is_unsigned<I>::value) { add_check(max - 1, 0); add_check(max, 0); } else { add_check(I(-1), 0); add_check(I(-20), 0); add_check(max - 1, 1); add_check(max, 1); add_check(min, 0); add_check(min + 1, 0); } auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::count_leading_zeros<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } TEST_CASE_TEMPLATE("average", I, int, unsigned) { using result_type = I; std::vector<result_type> expected_results; std::vector<I> lhs; std::vector<I> rhs; auto add_check = [&](I x, I y) { lhs.emplace_back(x); rhs.emplace_back(y); I result ( (int64_t{x} + int64_t{y}) / 2 ); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto mid_bit_on = I{1} << half_num_bits; add_check(0, 0); add_check(1, 0); add_check(1, 0); add_check(1, 1); add_check(mid_bit_on - 1, mid_bit_on - 1); add_check(mid_bit_on, mid_bit_on - 1); add_check(mid_bit_on - 1, mid_bit_on); add_check(mid_bit_on, mid_bit_on); add_check(mid_bit_on, mid_bit_on + 1); add_check(mid_bit_on + 1, mid_bit_on); add_check(mid_bit_on + 1, mid_bit_on + 1); add_check(max - 1, max - 1); add_check(max, max - 1); add_check(max - 1, max); add_check(max, max); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::average<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, lhs.data(), rhs.data()); } TEST_CASE_TEMPLATE("average_rounded_up", I, int, unsigned) { using result_type = I; std::vector<result_type> expected_results; std::vector<I> lhs; std::vector<I> rhs; auto add_check = [&](I x, I y) { lhs.emplace_back(x); rhs.emplace_back(y); I result ( ((int64_t{x} + int64_t{y}) + 1) / 2 ); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; constexpr const auto mid_bit_on = I{1} << half_num_bits; add_check(0, 0); add_check(1, 0); add_check(1, 0); add_check(1, 1); add_check(mid_bit_on - 1, mid_bit_on - 1); add_check(mid_bit_on, mid_bit_on - 1); add_check(mid_bit_on - 1, mid_bit_on); add_check(mid_bit_on, mid_bit_on); add_check(mid_bit_on, mid_bit_on + 1); add_check(mid_bit_on + 1, mid_bit_on); add_check(mid_bit_on + 1, mid_bit_on + 1); add_check(max - 1, max - 1); add_check(max, max - 1); add_check(max - 1, max); add_check(max, max); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::average_rounded_up<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, lhs.data(), rhs.data()); } } // TEST_SUITE("uniform builtins") TEST_SUITE("uniform non-builtins") { TEST_CASE_TEMPLATE("count_trailing_zeros", I, int, unsigned, long, unsigned long, long long, unsigned long long) { using result_type = int32_t; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x, result_type result) { values.emplace_back(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto num_all_bits = size_in_bits<I>(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; // constexpr const auto mid_bit_index = half_num_bits - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; // constexpr const auto msb_index { size_in_bits<I>() - 1 }; add_check(0, size_in_bits<I>()); add_check(0b1, 0); add_check(0b10, 1); add_check(0b11, 0); add_check(0b101, 0); add_check(mid_bit_on - 1, 0); add_check(mid_bit_on, half_num_bits); add_check(mid_bit_on + 1, 0); add_check(~ (mid_bit_on - 1), half_num_bits); add_check(~ (mid_bit_on), 0); add_check(~ (mid_bit_on + 1), 1); if (std::is_unsigned<I>::value) { add_check(max - 1, 1); add_check(max, 0); } else { add_check(I(-1), 0); add_check(I(-20), 2); add_check(max - 1, 1); add_check(max, 0); add_check(min, num_all_bits - 1); add_check(min + 1, 0); } auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::count_trailing_zeros<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } TEST_CASE_TEMPLATE("find_first_set", I, int, unsigned, long, unsigned long, long long, unsigned long long) { using result_type = int32_t; std::vector<result_type> expected_results; std::vector<I> values; auto add_check = [&](I x, result_type result) { values.emplace_back(x); expected_results.emplace_back(result); }; constexpr const auto max = std::numeric_limits<I>::max(); constexpr const auto min = std::numeric_limits<I>::min(); constexpr const auto num_all_bits = size_in_bits<I>(); constexpr const auto half_num_bits = size_in_bits<I>() / 2; // constexpr const auto mid_bit_index = half_num_bits - 1; constexpr const auto mid_bit_on = I{1} << half_num_bits; // constexpr const auto msb_index { size_in_bits<I>() - 1 }; add_check(0, -1 + 1); add_check(0b1, 0 + 1); add_check(0b10, 1 + 1); add_check(0b11, 0 + 1); add_check(0b101, 0 + 1); add_check(mid_bit_on - 1, 0 + 1); add_check(mid_bit_on, half_num_bits + 1); add_check(mid_bit_on + 1, 0 + 1); add_check(~ (mid_bit_on - 1), half_num_bits + 1); add_check(~ (mid_bit_on), 0 + 1); add_check(~ (mid_bit_on + 1), 1 + 1); if (std::is_unsigned<I>::value) { add_check(max - 1, 1 + 1); add_check(max, 0 + 1); } else { add_check(I(-1), 0 + 1); add_check(I(-20), 2 + 1); add_check(max - 1, 1 + 1); add_check(max, 0 + 1); add_check(min, num_all_bits - 1 + 1); add_check(min + 1, 0 + 1); } auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::find_first_set<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, values.data()); } TEST_CASE_TEMPLATE("extract_bits", I, int32_t, uint32_t, int64_t, uint64_t) //int, unsigned int, long)// , unsigned long)//, long long, unsigned long long) { using result_type = I; using bit_index_type = uint32_t; std::vector<result_type> expected_results; std::vector<I> bit_fields; std::vector<bit_index_type> start_positions; std::vector<bit_index_type> numbers_of_bits; auto add_check = [&]( I bits, bit_index_type start_pos, bit_index_type num_bits, result_type unsigned_result, std::make_signed_t<result_type> signed_result) { bit_fields.emplace_back(bits); start_positions.emplace_back(start_pos); numbers_of_bits.emplace_back(num_bits); expected_results.emplace_back( std::is_unsigned<I>::value ? unsigned_result : signed_result); }; // bit start num unsigned signed // field pos bits result result // ------------------------------------- add_check(0b0000, 0, 0, 0b0, 0b0); add_check(0b0000, 0, 1, 0b0, 0b0); add_check(0b0000, 0, 2, 0b00, 0b0); add_check(0b0000, 0, 3, 0b000, 0b0); add_check(0b0001, 0, 0, 0b0, 0b0); add_check(0b0001, 0, 1, 0b1, -0b1); add_check(0b0001, 0, 2, 0b01, 0b01); add_check(0b0001, 0, 3, 0b001, 0b001); add_check(0b0101, 0, 0, 0b0, 0b0); add_check(0b0101, 0, 1, 0b1, -0b1); add_check(0b0101, 0, 2, 0b01, 0b01); add_check(0b0101, 0, 3, 0b101, -0b11); add_check(0b0000, 1, 0, 0b0, 0b0); add_check(0b0000, 1, 1, 0b0, 0b0); add_check(0b0000, 1, 2, 0b00, 0b0); add_check(0b0000, 1, 3, 0b000, 0b0); add_check(0b0001, 1, 0, 0b0, 0b0); add_check(0b0001, 1, 1, 0b0, 0b0); add_check(0b0001, 1, 2, 0b00, 0b0); add_check(0b0001, 1, 3, 0b000, 0b0); add_check(0b0101, 1, 0, 0b0, 0b0); add_check(0b0101, 1, 1, 0b0, 0b0); add_check(0b0101, 1, 2, 0b10, -0b10); add_check(0b0101, 1, 3, 0b010, 0b10); if (std::is_signed<I>::value) { // TODO: signed testcases } auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::extract_bits<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, bit_fields.data(), start_positions.data(), numbers_of_bits.data() ); } TEST_CASE_TEMPLATE("replace_bits", I, uint32_t, uint64_t) { using result_type = I; using bit_index_type = uint32_t; std::vector<result_type> expected_results; std::vector<I> original_bit_fields; std::vector<I> bits_to_insert; std::vector<bit_index_type> start_positions; std::vector<bit_index_type> numbers_of_bits; auto add_check = [&]( I original_bit_field, I bits_to_insert_into_this_field, bit_index_type start_pos, bit_index_type num_bits, result_type result) { original_bit_fields.emplace_back(original_bit_field); bits_to_insert.emplace_back(bits_to_insert_into_this_field); start_positions.emplace_back(start_pos); numbers_of_bits.emplace_back(num_bits); expected_results.emplace_back(result); }; // original bits to start num unsigned // bit field insert pos bits result // -------------------------------------- add_check( 0, 1, 0, 1, 1); add_check( 0, 1, 1, 1, 2); add_check( 0, 1, 2, 1, 4); add_check( 0, 1, 3, 1, 8); add_check( 0, 1, 5, 1, 32); add_check( 0, 1, 11, 1, 2048); add_check( 0, 1, 31, 1, (I(1) << 31) ); if (std::is_same<I, uint64_t>::value) { add_check( 0, 1, 63, 1, I(uint64_t{1} << 63)); } add_check(0b1000000001, 0b1110011, 2, 6, 0b1011001101); // Note: only inserting 6 bits, even though the bits_to_insert value has // a non-zero 7'th bit auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::replace_bits<I>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, bits_to_insert.data(), original_bit_fields.data(), start_positions.data(), numbers_of_bits.data() ); } TEST_CASE("funnel_shift_right") { using result_type = uint32_t; std::vector<result_type> expected_results; std::vector<uint32_t> low_words; std::vector<uint32_t> high_words; std::vector<uint32_t> shift_amounts; auto add_check = [&]( uint32_t low_word, uint32_t high_word, uint32_t shift_amount, result_type result) { low_words.emplace_back(low_word); high_words.emplace_back(high_word); shift_amounts.emplace_back(shift_amount); expected_results.emplace_back(result); }; // low high shift result // word word amount // ---------------------------------------------------- add_check( ~0u, 0u, 0, ~0u ); add_check( 0xCA7u, 0xDEADBEEFu, 0, 0xCA7u ); add_check( ~0u, 0u, 5, 0x07FFFFFFu ); add_check( ~0u, 0b111u, 4, 0x7FFFFFFFu ); add_check( 0u, 0xDEADBEEFu, 32, 0xDEADBEEFu ); add_check( 0xCA7u, 0xDEADBEEFu, 32, 0xDEADBEEFu ); add_check( 0xCA7u << 16, 0xDEADBEEFu, 16, 0xBEEF0CA7u ); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::funnel_shift_right<kat::builtins::funnel_shift_amount_resolution_mode_t::cap_at_full_word_size>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, low_words.data(), high_words.data(), shift_amounts.data() ); } TEST_CASE("funnel_shift_left") { using result_type = uint32_t; std::vector<result_type> expected_results; std::vector<uint32_t> low_words; std::vector<uint32_t> high_words; std::vector<uint32_t> shift_amounts; auto add_check = [&]( uint32_t low_word, uint32_t high_word, uint32_t shift_amount, result_type result) { low_words.emplace_back(low_word); high_words.emplace_back(high_word); shift_amounts.emplace_back(shift_amount); expected_results.emplace_back(result); }; // low high shift result // word word amount // ---------------------------------------------------- add_check( 0u, 0xDEADBEEFu, 0, 0xDEADBEEFu ); add_check( 0u, 0xDEADBEEFu, 4, 0xEADBEEF0u ); add_check( 0u, 0xDEADBEEFu, 16, 0xBEEF0000u ); add_check( 0x0ACEu << 16, 0xDEADBEEFu, 16, 0xBEEF0ACEu ); add_check( 0xDEADBEEFu, 0u, 32, 0xDEADBEEFu ); add_check( 0b10u, ~0u, 31, (1 << 31) | 0b1 ); auto num_checks = expected_results.size(); execute_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::funnel_shift_left<kat::builtins::funnel_shift_amount_resolution_mode_t::cap_at_full_word_size>{}, expected_results.data(), num_checks, make_exact_comparison<result_type>, low_words.data(), high_words.data(), shift_amounts.data() ); } } // TEST_SUITE("uniform non-builtins") // Builtins whose behavior is not uniform across all grid threads, // or depends on the behavior/values held by other threads TEST_SUITE("non-uniform builtins") { TEST_CASE("lane_index") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { return n++ % kat::warp_size; }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::lane_index{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } TEST_CASE("preceding_lanes_mask") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return (1u << lane_index) - 1; }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::preceding{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } TEST_CASE("preceding_and_self_lanes_mask") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return ((1u << lane_index) - 1) | (1u << lane_index); }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::preceding_and_self{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } TEST_CASE("self_lane_mask") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return (1u << lane_index); }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::self{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } TEST_CASE("succeeding_and_self_lanes_mask") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return ~((1u << lane_index) - 1); }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::succeeding_and_self{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } TEST_CASE("succeeding_lanes_mask") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; // No arguments auto generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return ~((1u << lane_index) - 1) & ~(1u << lane_index); }; std::generate_n( std::back_inserter(expected_results), num_checks, generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::succeeding{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type> ); } #if (__CUDACC_VER_MAJOR__ >= 9) TEST_CASE("ballot") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 1 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; std::vector<int> values; std::vector<kat::lane_mask_t> lane_masks; // Our testcase will have two "parts", in each of the two blocks. In this // first block we'll use the full mask; in the second block we'll use // two different masks (but not warp-uniformly). auto value_generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return lane_index % 2; }; std::generate_n( std::back_inserter(values), num_checks, value_generator ); auto lane_mask_generator = [&, n = 0] () mutable { if (n < block_size) { n++; return kat::lane_mask_t{kat::full_warp_mask}; } auto lane_index = n++ % kat::warp_size; constexpr const kat::lane_mask_t odd_lanes = 0b0101'0101'0101'0101'0101'0101'0101'0101; constexpr const kat::lane_mask_t even_lanes = 0b1010'1010'1010'1010'1010'1010'1010'1010; kat::lane_mask_t self_mask = 1 << lane_index; // Warp voting, matching etc. instructions typically require each lane to have itself // included in the mask of relevant lanes return self_mask | ( (lane_index < kat::warp_size / 2) ? odd_lanes : even_lanes ); // Note: the lane masks don't correspond to which lanes are looking at those lane masks }; std::generate_n( std::back_inserter(lane_masks), num_checks, lane_mask_generator ); auto result_generator = [&, n = 0] () mutable { auto lane_index = n % kat::warp_size; auto lane_value = values[n]; auto lane_mask = lane_masks[n]; kat::lane_mask_t ballot { 0 }; for(auto i = 0; i < kat::warp_size; i++) { if (values[n - lane_index + i]) { ballot |= (1u << i); } } ballot &= lane_mask; n++; return ballot; }; std::generate_n( std::back_inserter(expected_results), num_checks, result_generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::ballot{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type>, values.data(), lane_masks.data() ); } TEST_CASE("all_lanes_satisfy") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; std::vector<int> values; std::vector<kat::lane_mask_t> lane_masks; // Our testcase will have two "parts", in each of the two blocks. In this // first block we'll use the full mask; in the second block we'll use // two different masks (but not warp-uniformly). auto value_generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return lane_index % 2; }; std::generate_n( std::back_inserter(values), num_checks, value_generator ); auto lane_mask_generator = [&, n = 0] () mutable { if (n < block_size) { n++; return kat::lane_mask_t{kat::full_warp_mask}; } auto lane_index = n++ % kat::warp_size; constexpr const kat::lane_mask_t odd_lanes = 0b0101'0101'0101'0101'0101'0101'0101'0101; constexpr const kat::lane_mask_t even_lanes = 0b1010'1010'1010'1010'1010'1010'1010'1010; kat::lane_mask_t self_mask = 1 << lane_index; // Warp voting, matching etc. instructions typically require each lane to have itself // included in the mask of relevant lanes return self_mask | ( (lane_index < kat::warp_size / 2) ? odd_lanes : even_lanes ); // Note: the lane masks don't correspond to which lanes are looking at those lane masks }; std::generate_n( std::back_inserter(lane_masks), num_checks, lane_mask_generator ); auto result_generator = [&, n = 0] () mutable { auto lane_index = n % kat::warp_size; auto lane_value = values[n]; auto lane_mask = lane_masks[n]; kat::lane_mask_t ballot { 0 }; for(auto i = 0; i < kat::warp_size; i++) { if (values[n - lane_index + i]) { ballot |= (1 << i); } } n++; return (ballot ^ ~lane_mask) == kat::full_warp_mask; }; std::generate_n( std::back_inserter(expected_results), num_checks, result_generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::all_lanes_satisfy{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type>, values.data(), lane_masks.data() ); } TEST_CASE("any_lanes_satisfy") { using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; std::vector<int> values; std::vector<kat::lane_mask_t> lane_masks; // Our testcase will have two "parts", in each of the two blocks. In this // first block we'll use the full mask; in the second block we'll use // two different masks (but not warp-uniformly). auto value_generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return lane_index % 2; }; std::generate_n( std::back_inserter(values), num_checks, value_generator ); auto lane_mask_generator = [&, n = 0] () mutable { if (n < block_size) { n++; return kat::lane_mask_t{kat::full_warp_mask}; } auto lane_index = n++ % kat::warp_size; constexpr const kat::lane_mask_t odd_lanes = 0b0101'0101'0101'0101'0101'0101'0101'0101; constexpr const kat::lane_mask_t even_lanes = 0b1010'1010'1010'1010'1010'1010'1010'1010; kat::lane_mask_t self_mask = 1 << lane_index; // Warp voting, matching etc. instructions typically require each lane to have itself // included in the mask of relevant lanes return self_mask | ( (lane_index < kat::warp_size / 2) ? odd_lanes : even_lanes ); // Note: the lane masks don't correspond to which lanes are looking at those lane masks }; std::generate_n( std::back_inserter(lane_masks), num_checks, lane_mask_generator ); auto result_generator = [&, n = 0] () mutable { auto lane_index = n % kat::warp_size; auto lane_value = values[n]; auto lane_mask = lane_masks[n]; kat::lane_mask_t ballot { 0 }; for(auto i = 0; i < kat::warp_size; i++) { if (values[n - lane_index + i]) { ballot |= (1 << i); } } n++; return (ballot & lane_mask) != 0; }; std::generate_n( std::back_inserter(expected_results), num_checks, result_generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::any_lanes_satisfy{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type>, values.data(), lane_masks.data() ); } #if ! defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700 TEST_CASE("all_lanes_agree") { cuda::device_t device { cuda::device::current::get() }; if (device.properties().compute_capability() < cuda::device::make_compute_capability(7,0)) { return; } using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; std::vector<int> values; std::vector<kat::lane_mask_t> lane_masks; // Our testcase will have two "parts", in each of the two blocks. In this // first block we'll use the full mask; in the second block we'll use // two different masks (but not warp-uniformly). auto value_generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return lane_index % 2; }; std::generate_n( std::back_inserter(values), num_checks, value_generator ); auto lane_mask_generator = [&, n = 0] () mutable { if (n < block_size) { n++; return kat::lane_mask_t{kat::full_warp_mask}; } auto lane_index = n++ % kat::warp_size; constexpr const kat::lane_mask_t odd_lanes = 0b0101'0101'0101'0101'0101'0101'0101'0101; constexpr const kat::lane_mask_t even_lanes = 0b1010'1010'1010'1010'1010'1010'1010'1010; kat::lane_mask_t self_mask = 1 << lane_index; // Warp voting, matching etc. instructions typically require each lane to have itself // included in the mask of relevant lanes return self_mask | ( (lane_index < kat::warp_size / 2) ? odd_lanes : even_lanes ); // Note: the lane masks don't correspond to which lanes are looking at those lane masks }; std::generate_n( std::back_inserter(lane_masks), num_checks, lane_mask_generator ); auto result_generator = [&, n = 0] () mutable { auto lane_index = n % kat::warp_size; auto lane_value = values[n]; auto lane_mask = lane_masks[n]; kat::lane_mask_t ballot { 0 }; for(auto i = 0; i < kat::warp_size; i++) { if (values[n - lane_index + i]) { ballot |= (1 << i); } } n++; return (ballot & lane_mask) != 0; }; std::generate_n( std::back_inserter(expected_results), num_checks, result_generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::all_lanes_agree{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type>, values.data(), lane_masks.data() ); } #endif TEST_CASE_TEMPLATE("get_matching_lanes", I, int, unsigned, long, unsigned long, long long, unsigned long long) { cuda::device_t device { cuda::device::current::get() }; if (device.properties().compute_capability() < cuda::device::make_compute_capability(7,0)) { return; } using result_type = uint32_t; auto block_size { kat::warp_size * 2 }; auto num_grid_blocks { 2 }; auto num_checks = block_size * num_grid_blocks; // one per thread std::vector<result_type> expected_results; std::vector<I> values; std::vector<kat::lane_mask_t> lane_masks; // Our testcase will have two "parts", in each of the two blocks. In this // first block we'll use the full mask; in the second block we'll use // two different masks (but not warp-uniformly). auto value_generator = [n = 0] () mutable { auto lane_index = n++ % kat::warp_size; return lane_index % 3; }; std::generate_n( std::back_inserter(values), num_checks, value_generator ); auto lane_mask_generator = [&, n = 0] () mutable { if (n < block_size) { n++; return kat::lane_mask_t{kat::full_warp_mask}; } auto lane_index = n++ % kat::warp_size; constexpr const kat::lane_mask_t odd_lanes = 0b0101'0101'0101'0101'0101'0101'0101'0101; constexpr const kat::lane_mask_t even_lanes = 0b1010'1010'1010'1010'1010'1010'1010'1010; kat::lane_mask_t self_mask = 1 << lane_index; // Warp voting, matching etc. instructions typically require each lane to have itself // included in the mask of relevant lanes return self_mask | ( (lane_index < kat::warp_size / 2) ? odd_lanes : even_lanes ); // Note: the lane masks don't correspond to which lanes are looking at those lane masks }; std::generate_n( std::back_inserter(lane_masks), num_checks, lane_mask_generator ); auto result_generator = [&, n = 0] () mutable { auto lane_index = n % kat::warp_size; auto lane_value = values[n]; auto lane_mask = lane_masks[n]; kat::lane_mask_t matches { 0 }; for(auto i = 0; i < kat::warp_size; i++) { if (values[n - lane_index + i] == lane_value) { matches |= (1 << i); } } matches &= lane_mask; n++; return matches; }; std::generate_n( std::back_inserter(expected_results), num_checks, result_generator ); auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; execute_non_uniform_builtin_testcase_on_gpu_and_check( device_function_ptrs::succeeding{}, expected_results.data(), num_checks, num_grid_blocks, block_size, make_exact_comparison<result_type>, values.data(), lane_masks.data() ); } // Note: Will not be testing the variants of these for versions of CUDA before 9. #endif // CUDA 9 } // TEST_SUITE("non-uniform builtins") /* The following are a bit tricky to test - just need to check uniformity of results across the block or the grid? unsigned special_registers::symmetric_multiprocessor_index(); unsigned long long special_registers::grid_index(); The following are tested indirectly via shared-memory-related tests: unsigned int special_registers::dynamic_shared_memory_size(); unsigned int special_registers::total_shared_memory_size(); */
the_stack
#include <cugraph/legacy/graph.hpp> #include "vertex_binning.cuh" namespace cugraph { namespace mg { namespace detail { template <typename vertex_t, typename edge_t> __device__ void write_to_frontier(vertex_t const* thread_frontier, int thread_frontier_count, vertex_t* block_frontier, int* block_frontier_count, vertex_t* output_frontier, edge_t* block_write_offset, edge_t* output_frontier_count) { // Set frontier count for block to 0 if (threadIdx.x == 0) { *block_frontier_count = 0; } __syncthreads(); // Find out where to write the thread frontier to shared memory int thread_write_offset = atomicAdd(block_frontier_count, thread_frontier_count); for (int i = 0; i < thread_frontier_count; ++i) { block_frontier[i + thread_write_offset] = thread_frontier[i]; } __syncthreads(); // If the total number of frontiers for this block is 0 then return if (*block_frontier_count == 0) { return; } // Find out where to write the block frontier to global memory if (threadIdx.x == 0) { *block_write_offset = cugraph::detail::traversal::atomicAdd( output_frontier_count, static_cast<edge_t>(*block_frontier_count)); } __syncthreads(); // Write block frontier to global memory for (int i = threadIdx.x; i < (*block_frontier_count); i += blockDim.x) { output_frontier[(*block_write_offset) + i] = block_frontier[i]; } } template <int BlockSize, int EdgesPerThread, typename vertex_t, typename edge_t, typename operator_t> __global__ void block_per_vertex(edge_t const* offsets, vertex_t const* indices, vertex_t const* input_frontier, vertex_t input_frontier_count, vertex_t vertex_begin, vertex_t* output_frontier, edge_t* output_frontier_count, operator_t op) { if (blockIdx.x >= input_frontier_count) { return; } __shared__ edge_t block_write_offset; __shared__ vertex_t block_frontier[BlockSize * EdgesPerThread]; __shared__ int block_frontier_count; vertex_t thread_frontier[EdgesPerThread]; vertex_t source = input_frontier[blockIdx.x]; edge_t beg_edge_offset = offsets[source]; edge_t end_edge_offset = offsets[source + 1]; edge_t edge_offset = threadIdx.x + beg_edge_offset; int num_iter = (end_edge_offset - beg_edge_offset + BlockSize - 1) / BlockSize; int thread_frontier_count = 0; for (int i = 0; i < num_iter; ++i) { if (edge_offset < end_edge_offset) { vertex_t destination = indices[edge_offset]; // If operator returns true then add to local frontier if (op(source + vertex_begin, destination)) { thread_frontier[thread_frontier_count++] = destination; } } bool is_last_iter = (i == (num_iter - 1)); bool is_nth_iter = (i % EdgesPerThread == 0); // Write to frontier every EdgesPerThread iterations // Or if it is the last iteration of the for loop if (is_nth_iter || is_last_iter) { write_to_frontier(thread_frontier, thread_frontier_count, block_frontier, &block_frontier_count, output_frontier, &block_write_offset, output_frontier_count); thread_frontier_count = 0; } edge_offset += blockDim.x; } } template <int BlockSize, int EdgesPerThread, typename vertex_t, typename edge_t, typename operator_t> __global__ void kernel_per_vertex(edge_t const* offsets, vertex_t const* indices, vertex_t const* input_frontier, vertex_t input_frontier_count, vertex_t vertex_begin, vertex_t* output_frontier, edge_t* output_frontier_count, operator_t op) { vertex_t current_vertex_index = 0; __shared__ edge_t block_write_offset; __shared__ vertex_t block_frontier[BlockSize * EdgesPerThread]; __shared__ int block_frontier_count; edge_t stride = blockDim.x * gridDim.x; vertex_t thread_frontier[EdgesPerThread]; while (current_vertex_index < input_frontier_count) { vertex_t source = input_frontier[current_vertex_index]; edge_t beg_block_offset = offsets[source] + (blockIdx.x * blockDim.x); edge_t end_block_offset = offsets[source + 1]; int i = 0; int thread_frontier_count = 0; for (edge_t block_offset = beg_block_offset; block_offset < end_block_offset; block_offset += stride) { if (block_offset + threadIdx.x < end_block_offset) { vertex_t destination = indices[block_offset + threadIdx.x]; if (op(source + vertex_begin, destination)) { thread_frontier[thread_frontier_count++] = destination; } } bool is_last_iter = (block_offset + blockDim.x >= end_block_offset); bool is_nth_iter = (i % EdgesPerThread == 0); if (is_nth_iter || is_last_iter) { write_to_frontier(thread_frontier, thread_frontier_count, block_frontier, &block_frontier_count, output_frontier, &block_write_offset, output_frontier_count); thread_frontier_count = 0; } ++i; } ++current_vertex_index; } } template <typename vertex_t, typename edge_t, typename weight_t, typename operator_t> void large_vertex_lb(cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph, DegreeBucket<vertex_t, edge_t>& bucket, operator_t op, vertex_t vertex_begin, vertex_t* output_vertex_ids, edge_t* output_vertex_ids_offset, cudaStream_t stream) { if (bucket.numberOfVertices != 0) { const int block_size = 1024; int block_count = (1 << (bucket.ceilLogDegreeStart - 8)); kernel_per_vertex<block_size, 2> <<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); CHECK_CUDA(stream); } } template <typename vertex_t, typename edge_t, typename weight_t, typename operator_t> void medium_vertex_lb(cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph, DegreeBucket<vertex_t, edge_t>& bucket, operator_t op, vertex_t vertex_begin, vertex_t* output_vertex_ids, edge_t* output_vertex_ids_offset, cudaStream_t stream) { // Vertices with degrees 2^12 <= d < 2^16 are handled by this kernel // Block size of 1024 is chosen to reduce wasted threads for a vertex const int block_size = 1024; int block_count = bucket.numberOfVertices; if (block_count != 0) { block_per_vertex<block_size, 2> <<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); CHECK_CUDA(stream); } } template <typename vertex_t, typename edge_t, typename weight_t, typename operator_t> void small_vertex_lb(cugraph::legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph, DegreeBucket<vertex_t, edge_t>& bucket, operator_t op, vertex_t vertex_begin, vertex_t* output_vertex_ids, edge_t* output_vertex_ids_offset, cudaStream_t stream) { int block_count = bucket.numberOfVertices; if (block_count == 0) { return; } // For vertices with degree <= 32 block size of 32 is chosen // For all vertices with degree d such that 2^x <= d < 2^x+1 // the block size is chosen to be 2^x. This is done so that // vertices with degrees 1.5*2^x are also handled in a load // balanced way int block_size = 512; if (bucket.ceilLogDegreeEnd < 6) { block_size = 32; block_per_vertex<32, 8><<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); } else if (bucket.ceilLogDegreeEnd < 8) { block_size = 64; block_per_vertex<64, 8><<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); } else if (bucket.ceilLogDegreeEnd < 10) { block_size = 128; block_per_vertex<128, 8><<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); } else if (bucket.ceilLogDegreeEnd < 12) { block_size = 512; block_per_vertex<512, 4><<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); } else { block_size = 512; block_per_vertex<512, 4><<<block_count, block_size, 0, stream>>>(graph.offsets, graph.indices, bucket.vertexIds, bucket.numberOfVertices, vertex_begin, output_vertex_ids, output_vertex_ids_offset, op); } CHECK_CUDA(stream); } } // namespace detail } // namespace mg } // namespace cugraph
the_stack
#ifdef __cplusplus extern "C" { #endif __constant__ int T = 256; __constant__ int acceptTopology[48] = {1, 2, 3, 4, 6, 8, 9, 12, 15, 16, 17, 32, 34, 48, 51, 63, 64, 68, 96, 102, 111, 119, 127, 0, // upper 254, 253, 252, 251, 249, 247, 246, 243, 240, 239, 238, 223, 221, 207, 204, 192, 191, 187, 159, 153, 144, 136, 128, 255}; // bottom // each row denotes a topology type // each column denotes one of the vertex of a cell // 2^8 = 256 __constant__ int occTable[256][8] = {{ 0, 0, 0, 0, 0, 0, 0, 0 }, { 1, 0, 0, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 0, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0, 0 }, { 1, 0, 1, 0, 0, 0, 0, 0 }, { 0, 1, 1, 0, 0, 0, 0, 0 }, { 1, 1, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 1, 0, 0, 0, 0 }, { 1, 0, 0, 1, 0, 0, 0, 0 }, { 0, 1, 0, 1, 0, 0, 0, 0 }, { 1, 1, 0, 1, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 0, 0 }, { 1, 0, 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0, 0, 0 }, { 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0 }, { 1, 0, 0, 0, 1, 0, 0, 0 }, { 0, 1, 0, 0, 1, 0, 0, 0 }, { 1, 1, 0, 0, 1, 0, 0, 0 }, { 0, 0, 1, 0, 1, 0, 0, 0 }, { 1, 0, 1, 0, 1, 0, 0, 0 }, { 0, 1, 1, 0, 1, 0, 0, 0 }, { 1, 1, 1, 0, 1, 0, 0, 0 }, { 0, 0, 0, 1, 1, 0, 0, 0 }, { 1, 0, 0, 1, 1, 0, 0, 0 }, { 0, 1, 0, 1, 1, 0, 0, 0 }, { 1, 1, 0, 1, 1, 0, 0, 0 }, { 0, 0, 1, 1, 1, 0, 0, 0 }, { 1, 0, 1, 1, 1, 0, 0, 0 }, { 0, 1, 1, 1, 1, 0, 0, 0 }, { 1, 1, 1, 1, 1, 0, 0, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0 }, { 1, 0, 0, 0, 0, 1, 0, 0 }, { 0, 1, 0, 0, 0, 1, 0, 0 }, { 1, 1, 0, 0, 0, 1, 0, 0 }, { 0, 0, 1, 0, 0, 1, 0, 0 }, { 1, 0, 1, 0, 0, 1, 0, 0 }, { 0, 1, 1, 0, 0, 1, 0, 0 }, { 1, 1, 1, 0, 0, 1, 0, 0 }, { 0, 0, 0, 1, 0, 1, 0, 0 }, { 1, 0, 0, 1, 0, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 0 }, { 1, 1, 0, 1, 0, 1, 0, 0 }, { 0, 0, 1, 1, 0, 1, 0, 0 }, { 1, 0, 1, 1, 0, 1, 0, 0 }, { 0, 1, 1, 1, 0, 1, 0, 0 }, { 1, 1, 1, 1, 0, 1, 0, 0 }, { 0, 0, 0, 0, 1, 1, 0, 0 }, { 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 0, 0, 1, 1, 0, 0 }, { 1, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 0, 1, 1, 0, 0 }, { 1, 0, 1, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0 }, { 1, 1, 1, 0, 1, 1, 0, 0 }, { 0, 0, 0, 1, 1, 1, 0, 0 }, { 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 1, 1, 0, 0 }, { 1, 1, 0, 1, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0 }, { 1, 0, 1, 1, 1, 1, 0, 0 }, { 0, 1, 1, 1, 1, 1, 0, 0 }, { 1, 1, 1, 1, 1, 1, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0 }, { 1, 0, 0, 0, 0, 0, 1, 0 }, { 0, 1, 0, 0, 0, 0, 1, 0 }, { 1, 1, 0, 0, 0, 0, 1, 0 }, { 0, 0, 1, 0, 0, 0, 1, 0 }, { 1, 0, 1, 0, 0, 0, 1, 0 }, { 0, 1, 1, 0, 0, 0, 1, 0 }, { 1, 1, 1, 0, 0, 0, 1, 0 }, { 0, 0, 0, 1, 0, 0, 1, 0 }, { 1, 0, 0, 1, 0, 0, 1, 0 }, { 0, 1, 0, 1, 0, 0, 1, 0 }, { 1, 1, 0, 1, 0, 0, 1, 0 }, { 0, 0, 1, 1, 0, 0, 1, 0 }, { 1, 0, 1, 1, 0, 0, 1, 0 }, { 0, 1, 1, 1, 0, 0, 1, 0 }, { 1, 1, 1, 1, 0, 0, 1, 0 }, { 0, 0, 0, 0, 1, 0, 1, 0 }, { 1, 0, 0, 0, 1, 0, 1, 0 }, { 0, 1, 0, 0, 1, 0, 1, 0 }, { 1, 1, 0, 0, 1, 0, 1, 0 }, { 0, 0, 1, 0, 1, 0, 1, 0 }, { 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 1, 1, 0, 1, 0, 1, 0 }, { 1, 1, 1, 0, 1, 0, 1, 0 }, { 0, 0, 0, 1, 1, 0, 1, 0 }, { 1, 0, 0, 1, 1, 0, 1, 0 }, { 0, 1, 0, 1, 1, 0, 1, 0 }, { 1, 1, 0, 1, 1, 0, 1, 0 }, { 0, 0, 1, 1, 1, 0, 1, 0 }, { 1, 0, 1, 1, 1, 0, 1, 0 }, { 0, 1, 1, 1, 1, 0, 1, 0 }, { 1, 1, 1, 1, 1, 0, 1, 0 }, { 0, 0, 0, 0, 0, 1, 1, 0 }, { 1, 0, 0, 0, 0, 1, 1, 0 }, { 0, 1, 0, 0, 0, 1, 1, 0 }, { 1, 1, 0, 0, 0, 1, 1, 0 }, { 0, 0, 1, 0, 0, 1, 1, 0 }, { 1, 0, 1, 0, 0, 1, 1, 0 }, { 0, 1, 1, 0, 0, 1, 1, 0 }, { 1, 1, 1, 0, 0, 1, 1, 0 }, { 0, 0, 0, 1, 0, 1, 1, 0 }, { 1, 0, 0, 1, 0, 1, 1, 0 }, { 0, 1, 0, 1, 0, 1, 1, 0 }, { 1, 1, 0, 1, 0, 1, 1, 0 }, { 0, 0, 1, 1, 0, 1, 1, 0 }, { 1, 0, 1, 1, 0, 1, 1, 0 }, { 0, 1, 1, 1, 0, 1, 1, 0 }, { 1, 1, 1, 1, 0, 1, 1, 0 }, { 0, 0, 0, 0, 1, 1, 1, 0 }, { 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 1, 0, 0, 1, 1, 1, 0 }, { 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 1, 0, 1, 1, 1, 0 }, { 1, 0, 1, 0, 1, 1, 1, 0 }, { 0, 1, 1, 0, 1, 1, 1, 0 }, { 1, 1, 1, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 1, 1, 1, 0 }, { 1, 0, 0, 1, 1, 1, 1, 0 }, { 0, 1, 0, 1, 1, 1, 1, 0 }, { 1, 1, 0, 1, 1, 1, 1, 0 }, { 0, 0, 1, 1, 1, 1, 1, 0 }, { 1, 0, 1, 1, 1, 1, 1, 0 }, { 0, 1, 1, 1, 1, 1, 1, 0 }, { 1, 1, 1, 1, 1, 1, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 1 }, { 1, 0, 0, 0, 0, 0, 0, 1 }, { 0, 1, 0, 0, 0, 0, 0, 1 }, { 1, 1, 0, 0, 0, 0, 0, 1 }, { 0, 0, 1, 0, 0, 0, 0, 1 }, { 1, 0, 1, 0, 0, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 0, 1 }, { 1, 1, 1, 0, 0, 0, 0, 1 }, { 0, 0, 0, 1, 0, 0, 0, 1 }, { 1, 0, 0, 1, 0, 0, 0, 1 }, { 0, 1, 0, 1, 0, 0, 0, 1 }, { 1, 1, 0, 1, 0, 0, 0, 1 }, { 0, 0, 1, 1, 0, 0, 0, 1 }, { 1, 0, 1, 1, 0, 0, 0, 1 }, { 0, 1, 1, 1, 0, 0, 0, 1 }, { 1, 1, 1, 1, 0, 0, 0, 1 }, { 0, 0, 0, 0, 1, 0, 0, 1 }, { 1, 0, 0, 0, 1, 0, 0, 1 }, { 0, 1, 0, 0, 1, 0, 0, 1 }, { 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 0, 1, 0, 1, 0, 0, 1 }, { 1, 0, 1, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 1, 0, 0, 1 }, { 1, 1, 1, 0, 1, 0, 0, 1 }, { 0, 0, 0, 1, 1, 0, 0, 1 }, { 1, 0, 0, 1, 1, 0, 0, 1 }, { 0, 1, 0, 1, 1, 0, 0, 1 }, { 1, 1, 0, 1, 1, 0, 0, 1 }, { 0, 0, 1, 1, 1, 0, 0, 1 }, { 1, 0, 1, 1, 1, 0, 0, 1 }, { 0, 1, 1, 1, 1, 0, 0, 1 }, { 1, 1, 1, 1, 1, 0, 0, 1 }, { 0, 0, 0, 0, 0, 1, 0, 1 }, { 1, 0, 0, 0, 0, 1, 0, 1 }, { 0, 1, 0, 0, 0, 1, 0, 1 }, { 1, 1, 0, 0, 0, 1, 0, 1 }, { 0, 0, 1, 0, 0, 1, 0, 1 }, { 1, 0, 1, 0, 0, 1, 0, 1 }, { 0, 1, 1, 0, 0, 1, 0, 1 }, { 1, 1, 1, 0, 0, 1, 0, 1 }, { 0, 0, 0, 1, 0, 1, 0, 1 }, { 1, 0, 0, 1, 0, 1, 0, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1 }, { 1, 1, 0, 1, 0, 1, 0, 1 }, { 0, 0, 1, 1, 0, 1, 0, 1 }, { 1, 0, 1, 1, 0, 1, 0, 1 }, { 0, 1, 1, 1, 0, 1, 0, 1 }, { 1, 1, 1, 1, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1, 0, 1 }, { 1, 0, 0, 0, 1, 1, 0, 1 }, { 0, 1, 0, 0, 1, 1, 0, 1 }, { 1, 1, 0, 0, 1, 1, 0, 1 }, { 0, 0, 1, 0, 1, 1, 0, 1 }, { 1, 0, 1, 0, 1, 1, 0, 1 }, { 0, 1, 1, 0, 1, 1, 0, 1 }, { 1, 1, 1, 0, 1, 1, 0, 1 }, { 0, 0, 0, 1, 1, 1, 0, 1 }, { 1, 0, 0, 1, 1, 1, 0, 1 }, { 0, 1, 0, 1, 1, 1, 0, 1 }, { 1, 1, 0, 1, 1, 1, 0, 1 }, { 0, 0, 1, 1, 1, 1, 0, 1 }, { 1, 0, 1, 1, 1, 1, 0, 1 }, { 0, 1, 1, 1, 1, 1, 0, 1 }, { 1, 1, 1, 1, 1, 1, 0, 1 }, { 0, 0, 0, 0, 0, 0, 1, 1 }, { 1, 0, 0, 0, 0, 0, 1, 1 }, { 0, 1, 0, 0, 0, 0, 1, 1 }, { 1, 1, 0, 0, 0, 0, 1, 1 }, { 0, 0, 1, 0, 0, 0, 1, 1 }, { 1, 0, 1, 0, 0, 0, 1, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1 }, { 1, 1, 1, 0, 0, 0, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1 }, { 1, 0, 0, 1, 0, 0, 1, 1 }, { 0, 1, 0, 1, 0, 0, 1, 1 }, { 1, 1, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1 }, { 1, 0, 1, 1, 0, 0, 1, 1 }, { 0, 1, 1, 1, 0, 0, 1, 1 }, { 1, 1, 1, 1, 0, 0, 1, 1 }, { 0, 0, 0, 0, 1, 0, 1, 1 }, { 1, 0, 0, 0, 1, 0, 1, 1 }, { 0, 1, 0, 0, 1, 0, 1, 1 }, { 1, 1, 0, 0, 1, 0, 1, 1 }, { 0, 0, 1, 0, 1, 0, 1, 1 }, { 1, 0, 1, 0, 1, 0, 1, 1 }, { 0, 1, 1, 0, 1, 0, 1, 1 }, { 1, 1, 1, 0, 1, 0, 1, 1 }, { 0, 0, 0, 1, 1, 0, 1, 1 }, { 1, 0, 0, 1, 1, 0, 1, 1 }, { 0, 1, 0, 1, 1, 0, 1, 1 }, { 1, 1, 0, 1, 1, 0, 1, 1 }, { 0, 0, 1, 1, 1, 0, 1, 1 }, { 1, 0, 1, 1, 1, 0, 1, 1 }, { 0, 1, 1, 1, 1, 0, 1, 1 }, { 1, 1, 1, 1, 1, 0, 1, 1 }, { 0, 0, 0, 0, 0, 1, 1, 1 }, { 1, 0, 0, 0, 0, 1, 1, 1 }, { 0, 1, 0, 0, 0, 1, 1, 1 }, { 1, 1, 0, 0, 0, 1, 1, 1 }, { 0, 0, 1, 0, 0, 1, 1, 1 }, { 1, 0, 1, 0, 0, 1, 1, 1 }, { 0, 1, 1, 0, 0, 1, 1, 1 }, { 1, 1, 1, 0, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 1, 1, 1 }, { 1, 0, 0, 1, 0, 1, 1, 1 }, { 0, 1, 0, 1, 0, 1, 1, 1 }, { 1, 1, 0, 1, 0, 1, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1 }, { 1, 0, 1, 1, 0, 1, 1, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1 }, { 1, 1, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1 }, { 1, 0, 0, 0, 1, 1, 1, 1 }, { 0, 1, 0, 0, 1, 1, 1, 1 }, { 1, 1, 0, 0, 1, 1, 1, 1 }, { 0, 0, 1, 0, 1, 1, 1, 1 }, { 1, 0, 1, 0, 1, 1, 1, 1 }, { 0, 1, 1, 0, 1, 1, 1, 1 }, { 1, 1, 1, 0, 1, 1, 1, 1 }, { 0, 0, 0, 1, 1, 1, 1, 1 }, { 1, 0, 0, 1, 1, 1, 1, 1 }, { 0, 1, 0, 1, 1, 1, 1, 1 }, { 1, 1, 0, 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 1, 1, 1, 1 }, { 1, 0, 1, 1, 1, 1, 1, 1 }, { 0, 1, 1, 1, 1, 1, 1, 1 }, { 1, 1, 1, 1, 1, 1, 1, 1 }}; __constant__ int vertexTable[8][3]={ {0, 1, 0}, {1, 1, 0}, {1, 0, 0}, {0, 0, 0}, {0, 1, 1}, {1, 1, 1}, {1, 0, 1}, {0, 0, 1} }; /** * convert the topology probabilites from the occupancy * parallel over every cell and every topology */ __global__ void occupancy_to_topology_kernel(const float *occupancy, float *topology){ // int W = gridDim.x; int H = gridDim.y; int D = gridDim.z; int i = blockIdx.x; int j = blockIdx.y; int k = blockIdx.z; int t = threadIdx.x; // return probabilities of all 256 topologies int topology_ind = t; float p_occ[2][8]; for (int v=0; v<8; v++){ p_occ[0][v] = occupancy[ (i+vertexTable[v][0])*(H+1)*(D+1) + (j+vertexTable[v][1])*(D+1) + k+vertexTable[v][2] ]; p_occ[1][v] = 1-p_occ[0][v]; } float p_accumu = 1.0; for (int v=0; v<8; v++){ p_accumu = p_accumu*p_occ[occTable[topology_ind][v]][v]; } topology[ (i*H*D+j*D+k)*T + t ] = p_accumu; } /** * propagate the gradient from the topology probabilities to occupancy status * parallel over every cell and every topology */ __global__ void grad_occupancy_to_topology_kernel(const float *grad_output, const float *occupancy, float *topology, float *grad_occupancy){ // int W = gridDim.x; int H = gridDim.y; int D = gridDim.z; int i = blockIdx.x; int j = blockIdx.y; int k = blockIdx.z; int t = threadIdx.x; // return probabilities of all 256 topologies int topology_ind = t; float p_occ[2][8]; for (int v=0; v<8; v++){ p_occ[0][v] = occupancy[ (i+vertexTable[v][0])*(H+1)*(D+1) + (j+vertexTable[v][1])*(D+1) + k+vertexTable[v][2] ]; p_occ[1][v] = 1-p_occ[0][v]; } //float p_accumu = topology[ (i*H+j)*T + t ]; float grad_accumu = grad_output[ (i*H*D+j*D+k)*T + t ]; // propagate the gradient to four occupancy corners float sign; for (int v=0; v<8; v++){ if (occTable[topology_ind][v]==0){ sign=1.0; }else{ sign=-1.0; } // re-calculate the probability excluding the current vertex // didn't use p_accumu/p_occ[occTable[t][v]][v] for numerial stability // TODO: find a better solution float p_accumu = 1.0; for (int v_=0; v_<8; v_++){ if (v_==v) continue; p_accumu = p_accumu*p_occ[occTable[topology_ind][v_]][v_]; } atomicAdd(&grad_occupancy[ (i+vertexTable[v][0])*(H+1)*(D+1) + (j+vertexTable[v][1])*(D+1) + k+vertexTable[v][2] ], sign*grad_accumu*p_accumu ); } } /* * Forward function, compute the topology probability given the occupancy probability * params: * state input, THCState * occupancy input, (W+1)x(H+1) * topology output, probability of all topologies types we care about (WxH)xT * */ void occupancy_to_topology_kernel_forward( THCState *state, THCudaTensor *occupancy, THCudaTensor *topology ){ int W = THCudaTensor_size(state, occupancy, 0)-1; int H = THCudaTensor_size(state, occupancy, 1)-1; int D = THCudaTensor_size(state, occupancy, 2)-1; int T = THCudaTensor_size(state, topology, 1); dim3 dimGrid(W, H, D); dim3 dimBlock(T, 1, 1); // lauch the kernel occupancy_to_topology_kernel<<< dimGrid, dimBlock, 0, THCState_getCurrentStream(state) >>>( THCudaTensor_data(state, occupancy), THCudaTensor_data(state, topology) ); } /* * Backward function, backpropagate the gradient from topology to occupancy * params: * state input, THCState * grad_output input, gradient on the topology probability, (WxH)xT * occupancy input, (W+1)x(H+1) * topology input, probability of all topologies types we care about (WxH)xT * grad_occupancy output, gradient on the occupancy map, (W+1)x(H+1) * */ void occupancy_to_topology_kernel_backward( THCState *state, THCudaTensor *grad_output, THCudaTensor *occupancy, THCudaTensor *topology, THCudaTensor *grad_occupancy ){ int W = THCudaTensor_size(state, occupancy, 0)-1; int H = THCudaTensor_size(state, occupancy, 1)-1; int D = THCudaTensor_size(state, occupancy, 2)-1; int T = THCudaTensor_size(state, topology, 1); dim3 dimGrid(W, H, D); dim3 dimBlock(T, 1, 1); // lauch the kernel grad_occupancy_to_topology_kernel<<< dimGrid, dimBlock, 0, THCState_getCurrentStream(state) >>>( THCudaTensor_data(state, grad_output), THCudaTensor_data(state, occupancy), THCudaTensor_data(state, topology), THCudaTensor_data(state, grad_occupancy) ); } #ifdef __cplusplus } #endif
the_stack
#include <stdio.h> #include "cuda_common.hpp" #include "acc_runtime.hpp" //NOTE: HIP will call the corresponding CUDA function if compiled with CUDA support template <int direction> __global__ void repack_z_buffer_gpu_kernel(int size_z, int num_zcol_loc, int const* local_z_offsets, int const* local_z_sizes, acc_complex_double_t* z_sticks_local, acc_complex_double_t* a2a_buffer) { int iz = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; int izcol = hipBlockIdx_y; int rank = hipBlockIdx_z; int local_zsize = local_z_sizes[rank]; if (iz < local_zsize) { int offs = local_z_offsets[rank]; if (direction == -1) { z_sticks_local[offs + iz + izcol * size_z] = a2a_buffer[offs * num_zcol_loc + izcol * local_zsize + iz]; } if (direction == 1) { a2a_buffer[offs * num_zcol_loc + izcol * local_zsize + iz] = z_sticks_local[offs + iz + izcol * size_z]; } } } extern "C" void repack_z_buffer_gpu(int direction, int num_ranks, int size_z, int num_zcol_loc, int zcol_max_size, int const* local_z_offsets, int const* local_z_sizes, acc_complex_double_t* z_sticks_local, acc_complex_double_t* a2a_buffer) { dim3 grid_t(64); dim3 grid_b(num_blocks(zcol_max_size, grid_t.x), num_zcol_loc, num_ranks); if (direction == 1) { accLaunchKernel((repack_z_buffer_gpu_kernel<1>), dim3(grid_b), dim3(grid_t), 0, 0, size_z, num_zcol_loc, local_z_offsets, local_z_sizes, z_sticks_local, a2a_buffer ); } else { accLaunchKernel((repack_z_buffer_gpu_kernel<-1>), dim3(grid_b), dim3(grid_t), 0, 0, size_z, num_zcol_loc, local_z_offsets, local_z_sizes, z_sticks_local, a2a_buffer ); } } __global__ void batch_load_gpu_kernel(int fft_size, int num_pw_components, int const* map, acc_complex_double_t const* data, acc_complex_double_t* fft_buffer) { int i = hipBlockIdx_y; int idx = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; if (idx < num_pw_components) { fft_buffer[array2D_offset(map[idx], i, fft_size)] = data[array2D_offset(idx, i, num_pw_components)]; } } extern "C" void batch_load_gpu(int fft_size, int num_pw_components, int num_fft, int const* map, acc_complex_double_t const* data, acc_complex_double_t* fft_buffer, int stream_id__) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_pw_components, grid_t.x), num_fft); acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); acc::zero(fft_buffer, fft_size*num_fft, stream_id(stream_id__)); accLaunchKernel((batch_load_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, stream, fft_size, num_pw_components, map, data, fft_buffer ); } __global__ void batch_unload_gpu_kernel(int fft_size, int num_pw_components, int const* map, acc_complex_double_t const* fft_buffer, acc_complex_double_t* data, double alpha, double beta) { int i = hipBlockIdx_y; int idx = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; if (idx < num_pw_components) { acc_complex_double_t z1 = data[array2D_offset(idx, i, num_pw_components)]; acc_complex_double_t z2 = fft_buffer[array2D_offset(map[idx], i, fft_size)]; data[array2D_offset(idx, i, num_pw_components)] = make_accDoubleComplex(alpha * z1.x + beta * z2.x, alpha * z1.y + beta * z2.y); //data[array2D_offset(idx, i, num_pw_components)] = cuCadd( // cuCmul(make_cuDoubleComplex(alpha, 0), data[array2D_offset(idx, i, num_pw_components)]), // cuCmul(make_cuDoubleComplex(beta, 0), fft_buffer[array2D_offset(map[idx], i, fft_size)])); } } /// Unload data from FFT buffer. /** The following operation is executed: * data[ig] = alpha * data[ig] + beta * fft_buffer[map[ig]] */ extern "C" void batch_unload_gpu(int fft_size, int num_pw_components, int num_fft, int const* map, acc_complex_double_t const* fft_buffer, acc_complex_double_t* data, double alpha, double beta, int stream_id__) { dim3 grid_t(64); dim3 grid_b(num_blocks(num_pw_components, grid_t.x), num_fft); acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); if (alpha == 0) { acc::zero(data, num_pw_components, stream_id(stream_id__)); } accLaunchKernel((batch_unload_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, stream, fft_size, num_pw_components, map, fft_buffer, data, alpha, beta ); } __global__ void load_x0y0_col_gpu_kernel(int z_col_size, int const* map, acc_complex_double_t const* data, acc_complex_double_t* fft_buffer) { int idx = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; if (idx < z_col_size) { fft_buffer[map[idx]] = make_accDoubleComplex(data[idx].x, -data[idx].y); } } extern "C" void load_x0y0_col_gpu(int z_col_size, int const* map, acc_complex_double_t const* data, acc_complex_double_t* fft_buffer, int stream_id__) { dim3 grid_t(64); dim3 grid_b(num_blocks(z_col_size, grid_t.x)); acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); accLaunchKernel((load_x0y0_col_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, stream, z_col_size, map, data, fft_buffer ); } template <int direction, bool conjugate> __global__ void pack_unpack_z_cols_gpu_kernel(acc_complex_double_t* z_cols_packed__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__) { int icol = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x; int iz = hipBlockIdx_y; int size_xy = size_x__ * size_y__; if (icol < num_z_cols__) { int ipos = z_col_pos__[icol]; /* load into buffer */ if (direction == 1) { if (conjugate) { fft_buf__[array2D_offset(ipos, iz, size_xy)].x = z_cols_packed__[array2D_offset(iz, icol, size_z__)].x; fft_buf__[array2D_offset(ipos, iz, size_xy)].y = -z_cols_packed__[array2D_offset(iz, icol, size_z__)].y; } else { fft_buf__[array2D_offset(ipos, iz, size_xy)] = z_cols_packed__[array2D_offset(iz, icol, size_z__)]; } } if (direction == -1) { z_cols_packed__[array2D_offset(iz, icol, size_z__)] = fft_buf__[array2D_offset(ipos, iz, size_xy)]; } } } extern "C" void unpack_z_cols_gpu(acc_complex_double_t* z_cols_packed__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__, bool use_reduction__, int stream_id__) { acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); dim3 grid_t(64); dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); acc::zero(fft_buf__, size_x__ * size_y__ * size_z__, stream_id(stream_id__)); accLaunchKernel((pack_unpack_z_cols_gpu_kernel<1, false>), dim3(grid_b), dim3(grid_t), 0, stream, z_cols_packed__, fft_buf__, size_x__, size_y__, size_z__, num_z_cols__, z_col_pos__ ); if (use_reduction__) { accLaunchKernel((pack_unpack_z_cols_gpu_kernel<1, true>), dim3(grid_b), dim3(grid_t), 0, stream, &z_cols_packed__[size_z__], // skip first column for {-x, -y} coordinates fft_buf__, size_x__, size_y__, size_z__, num_z_cols__ - 1, &z_col_pos__[num_z_cols__ + 1] // skip first column for {-x, -y} coordinates ); } } extern "C" void pack_z_cols_gpu(acc_complex_double_t* z_cols_packed__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__, int stream_id__) { acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); dim3 grid_t(64); dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); accLaunchKernel((pack_unpack_z_cols_gpu_kernel<-1, false>), dim3(grid_b), dim3(grid_t), 0, stream, z_cols_packed__, fft_buf__, size_x__, size_y__, size_z__, num_z_cols__, z_col_pos__ ); } template <int direction, bool conjugate> __global__ void pack_unpack_two_z_cols_gpu_kernel(acc_complex_double_t* z_cols_packed1__, acc_complex_double_t* z_cols_packed2__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__) { int icol = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x; int iz = hipBlockIdx_y; int size_xy = size_x__ * size_y__; if (icol < num_z_cols__) { /* load into buffer */ if (direction == 1) { int ipos = z_col_pos__[icol]; acc_complex_double_t z1 = z_cols_packed1__[array2D_offset(iz, icol, size_z__)]; acc_complex_double_t z2 = z_cols_packed2__[array2D_offset(iz, icol, size_z__)]; if (conjugate) { /* conj(z1) + I * conj(z2) */ fft_buf__[array2D_offset(ipos, iz, size_xy)] = make_accDoubleComplex(z1.x + z2.y, z2.x - z1.y); } else { /* z1 + I * z2 */ fft_buf__[array2D_offset(ipos, iz, size_xy)] = make_accDoubleComplex(z1.x - z2.y, z1.y + z2.x); } } if (direction == -1) { int ipos1 = z_col_pos__[icol]; int ipos2 = z_col_pos__[num_z_cols__ + icol]; acc_complex_double_t z1 = fft_buf__[array2D_offset(ipos1, iz, size_xy)]; acc_complex_double_t z2 = fft_buf__[array2D_offset(ipos2, iz, size_xy)]; z_cols_packed1__[array2D_offset(iz, icol, size_z__)] = make_accDoubleComplex(0.5 * (z1.x + z2.x), 0.5 * (z1.y - z2.y)); z_cols_packed2__[array2D_offset(iz, icol, size_z__)] = make_accDoubleComplex(0.5 * (z1.y + z2.y), 0.5 * (z2.x - z1.x)); } } } extern "C" void unpack_z_cols_2_gpu(acc_complex_double_t* z_cols_packed1__, acc_complex_double_t* z_cols_packed2__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__, int stream_id__) { acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); dim3 grid_t(64); dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); acc::zero(fft_buf__, size_x__ * size_y__ * size_z__, stream_id(stream_id__)); accLaunchKernel((pack_unpack_two_z_cols_gpu_kernel<1, false>), dim3(grid_b), dim3(grid_t), 0, stream, z_cols_packed1__, z_cols_packed2__, fft_buf__, size_x__, size_y__, size_z__, num_z_cols__, z_col_pos__ ); accLaunchKernel((pack_unpack_two_z_cols_gpu_kernel<1, true>), dim3(grid_b), dim3(grid_t), 0, stream, &z_cols_packed1__[size_z__], // skip first column for {-x, -y} coordinates &z_cols_packed2__[size_z__], // skip first column for {-x, -y} coordinates fft_buf__, size_x__, size_y__, size_z__, num_z_cols__ - 1, &z_col_pos__[num_z_cols__ + 1] // skip first column for {-x, -y} coordinates ); } extern "C" void pack_z_cols_2_gpu(acc_complex_double_t* z_cols_packed1__, acc_complex_double_t* z_cols_packed2__, acc_complex_double_t* fft_buf__, int size_x__, int size_y__, int size_z__, int num_z_cols__, int const* z_col_pos__, int stream_id__) { acc_stream_t stream = (acc_stream_t) acc::stream(stream_id(stream_id__)); dim3 grid_t(64); dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); accLaunchKernel((pack_unpack_two_z_cols_gpu_kernel<-1, false>), dim3(grid_b), dim3(grid_t), 0, stream, z_cols_packed1__, z_cols_packed2__, fft_buf__, size_x__, size_y__, size_z__, num_z_cols__, z_col_pos__ ); }
the_stack
#include <cuda.h> // Thrust Dependencies #include <thrust/device_ptr.h> #include <thrust/copy.h> #include <thrust/reduce.h> // Octree-SLAM Dependencies #include <octree_slam/sensor/localization_kernels.h> namespace octree_slam { namespace sensor { __device__ const float DIST_THRESH = 0.1f; //Use 10 cm distance threshold for correspondences __device__ const float NORM_THRESH = 0.87f; //Use 30 degree orientation threshold //Define structure to be used for combined Mat6x6 and Vec6 in thrust summation struct Mat6x7 { float values[42]; __host__ __device__ Mat6x7() {}; __host__ __device__ Mat6x7(const int val) { for (int i = 0; i < 42; i++) { values[i] = val; } }; }; __host__ __device__ inline Mat6x7 operator+(const Mat6x7& lhs, const Mat6x7& rhs) { Mat6x7 result; for (int i = 0; i < 42; i++) { result.values[i] = lhs.values[i] + rhs.values[i]; } return result; } ICPFrame::ICPFrame(const int w, const int h) : width(w), height(h) { cudaMalloc((void**)&vertex, width*height*sizeof(glm::vec3)); cudaMalloc((void**)&normal, width*height*sizeof(glm::vec3)); }; ICPFrame::~ICPFrame() { cudaFree(vertex); cudaFree(normal); } RGBDFrame::RGBDFrame(const int w, const int h) : width(w), height(h) { cudaMalloc((void**)&intensity, width*height*sizeof(float)); cudaMalloc((void**)&vertex, width*height*sizeof(glm::vec3)); } RGBDFrame::~RGBDFrame() { cudaFree(intensity); cudaFree(vertex); } __global__ void computeICPCorrespondences(const glm::vec3* last_frame_vertex, const glm::vec3* last_frame_normal, const glm::vec3* this_frame_vertex, const glm::vec3* this_frame_normal, const int num_points, bool* stencil, int* num_corr) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //Don't do anything if the index is out of bounds if (idx >= num_points) { return; } bool is_match = true; //Check whether points are any good if (!isfinite(this_frame_vertex[idx].x) || !isfinite(this_frame_vertex[idx].y) || !isfinite(this_frame_vertex[idx].z) || !isfinite(last_frame_vertex[idx].x) || !isfinite(last_frame_vertex[idx].y) || !isfinite(last_frame_vertex[idx].z)) { is_match = false; } if (!is_match || !isfinite(this_frame_normal[idx].x) || !isfinite(this_frame_normal[idx].y) || !isfinite(this_frame_normal[idx].z) || !isfinite(last_frame_normal[idx].x) || !isfinite(last_frame_normal[idx].y) || !isfinite(last_frame_normal[idx].z)) { is_match = false; } //Check position difference if (!is_match || glm::length(this_frame_vertex[idx] - last_frame_vertex[idx]) > DIST_THRESH) { is_match = false; } //Check normal difference if (!is_match || glm::dot(this_frame_normal[idx], last_frame_normal[idx]) < NORM_THRESH) { is_match = false; } //Update result stencil[idx] = is_match; //Subtract from global counter if its not a match if (!is_match) { atomicAdd(num_corr, -1); } } __global__ void computeICPCostsKernel(const glm::vec3* last_frame_normal, const glm::vec3* last_frame_vertex, const glm::vec3* this_frame_vertex, const int num_points, const int load_size, Mat6x7* As) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //Don't do anything if the index is out of bounds if (idx*load_size >= num_points) { return; } //Determine whether the full load is in the bounds int bound = load_size; if ((idx + 1)*load_size - num_points > 0) { bound -= (idx + 1)*load_size - num_points; } //Init outputs for (int i = 0; i < 7; i++) { for (int j = 0; j < 6; j++) { As[idx].values[6 * i + j] = 0.0f; } } //Loop through the load for (int k = 0; k < bound; k++) { //Get the vertex and normal values glm::vec3 v2 = this_frame_vertex[load_size*idx+k]; glm::vec3 v1 = last_frame_vertex[load_size*idx+k]; glm::vec3 n = last_frame_normal[load_size*idx+k]; //Construct A_T float G_T[18] = { 0.0f, -v2.x, -v2.y, -v2.z, 0.0f, v2.x, v2.y, v2.z, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float A_T[6]; for (int i = 0; i < 6; i++) { A_T[i] = G_T[3 * i] * n.x + G_T[3*i + 1] * n.y + G_T[3*i + 2] * n.z; } //Construct b float b = glm::dot(n, v1 - v2); //Compute outputs for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { As[idx].values[6*i + j] += A_T[i] * A_T[j]; } } for (int i = 0; i < 6; i++) { As[idx].values[36 + i] += b*A_T[i]; } } } __global__ void computeICPCostsUncorrespondedKernel(const glm::vec3* last_frame_normal, const glm::vec3* last_frame_vertex, const glm::vec3* this_frame_normal, const glm::vec3* this_frame_vertex, const int num_points, const int load_size, Mat6x7* As) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //Don't do anything if the index is out of bounds if (idx*load_size >= num_points) { return; } //Determine whether the full load is in the bounds int bound = load_size; if ((idx + 1)*load_size - num_points > 0) { bound -= (idx + 1)*load_size - num_points; } //Init outputs for (int i = 0; i < 7; i++) { for (int j = 0; j < 6; j++) { As[idx].values[6 * i + j] = 0.0f; } } //Loop through the load for (int k = 0; k < bound; k++) { //Get the vertex and normal values glm::vec3 v2 = this_frame_vertex[load_size*idx+k]; glm::vec3 n2 = this_frame_normal[load_size*idx+k]; glm::vec3 v1 = last_frame_vertex[load_size*idx+k]; glm::vec3 n1 = last_frame_normal[load_size*idx+k]; //Check whether points are any good if (!isfinite(v2.x) || !isfinite(v2.y) || !isfinite(v2.z) || !isfinite(v1.x) || !isfinite(v1.y) || !isfinite(v1.z) || (v1.z < 0.1f) || (v2.z < 0.1f) || (v1.z > 10.0f) || (v2.z > 10.0f)) { continue; } if (!isfinite(n2.x) || !isfinite(n2.y) || !isfinite(n2.z) || !isfinite(n1.x) || !isfinite(n1.y) || !isfinite(n1.z)) { continue; } //Check position difference if (glm::length(v2 - v1) > DIST_THRESH) { continue; } //Check normal difference if (glm::dot(n2, n1) < NORM_THRESH) { continue; } //Construct A_T float G_T[18] = { 0.0f, -v2.x, -v2.y, -v2.z, 0.0f, v2.x, v2.y, v2.z, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; float A_T[6]; for (int i = 0; i < 6; i++) { A_T[i] = G_T[3 * i] * n1.x + G_T[3 * i + 1] * n1.y + G_T[3 * i + 2] * n1.z; } //Construct b float b = glm::dot(n1, v1 - v2); //Compute outputs for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { As[idx].values[6 * i + j] += A_T[i] * A_T[j]; } } for (int i = 0; i < 6; i++) { As[idx].values[36+i] += b*A_T[i]; } } } extern "C" void computeICPCost(const ICPFrame* last_frame, const ICPFrame &this_frame, float* A, float* b) { //TODO: Verify that the two frames are the same size //Compute correspondences int num_correspondences = this_frame.width * this_frame.height; bool* d_stencil; int* d_num_corr; cudaMalloc((void**)&d_stencil, num_correspondences * sizeof(bool)); cudaMalloc((void**)&d_num_corr, sizeof(int)); cudaMemcpy(d_num_corr, &num_correspondences, sizeof(int), cudaMemcpyHostToDevice); //Initialize to the total points. Assume that most points will be valid computeICPCorrespondences<<<ceil((float)num_correspondences /256.0f), 256>>>(last_frame->vertex, last_frame->normal, this_frame.vertex, this_frame.normal, num_correspondences, d_stencil, d_num_corr); cudaDeviceSynchronize(); //Copy number of correspondences back from the device cudaMemcpy(&num_correspondences, d_num_corr, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(d_num_corr); //Don't continue without any correspondences if (num_correspondences <= 0) { return; } //Allocate memory for reduced copies glm::vec3* last_frame_reduced_vertex; cudaMalloc((void**)&last_frame_reduced_vertex, num_correspondences * sizeof(glm::vec3)); glm::vec3* last_frame_reduced_normal; cudaMalloc((void**)&last_frame_reduced_normal, num_correspondences * sizeof(glm::vec3)); glm::vec3* this_frame_reduced_vertex; cudaMalloc((void**)&this_frame_reduced_vertex, num_correspondences * sizeof(glm::vec3)); //Reduce inputs with thrust compaction thrust::device_ptr<glm::vec3> in, out; thrust::device_ptr<bool> sten = thrust::device_pointer_cast<bool>(d_stencil); in = thrust::device_pointer_cast<glm::vec3>(last_frame->vertex); out = thrust::device_pointer_cast<glm::vec3>(last_frame_reduced_vertex); thrust::copy_if(in, in + last_frame->width*last_frame->height, sten, out, thrust::identity<bool>()); in = thrust::device_pointer_cast<glm::vec3>(last_frame->normal); out = thrust::device_pointer_cast<glm::vec3>(last_frame_reduced_normal); thrust::copy_if(in, in + last_frame->width*last_frame->height, sten, out, thrust::identity<bool>()); in = thrust::device_pointer_cast<glm::vec3>(this_frame.vertex); out = thrust::device_pointer_cast<glm::vec3>(this_frame_reduced_vertex); thrust::copy_if(in, in + last_frame->width*last_frame->height, sten, out, thrust::identity<bool>()); //Free device memory from data in the compaction stages cudaFree(d_stencil); //Compute cost terms int load_size = 10; Mat6x7* d_A; //Note, the 6x6 A and 6x1 b are combined into a single array so they can be reduced together with thrust later cudaMalloc((void**) &d_A, (num_correspondences/load_size) * sizeof(Mat6x7)); computeICPCostsKernel<<<ceil((float)num_correspondences / 16.0f / (float)load_size), 16>>>(last_frame_reduced_normal, last_frame_reduced_vertex, this_frame_reduced_vertex, num_correspondences, load_size, d_A); cudaDeviceSynchronize(); //Free up device memory cudaFree(last_frame_reduced_vertex); cudaFree(last_frame_reduced_normal); cudaFree(this_frame_reduced_vertex); //Sum terms (reduce) with thrust thrust::device_ptr<Mat6x7> thrust_A = thrust::device_pointer_cast<Mat6x7>(d_A); Mat6x7 matA = thrust::reduce(thrust_A, thrust_A + (num_correspondences/load_size)); //Free up device memory cudaFree(d_A); //Copy result to output memcpy(A, matA.values, 36 * sizeof(float)); memcpy(b, matA.values + 36, 6 * sizeof(float)); } extern "C" void computeICPCost2(const ICPFrame* last_frame, const ICPFrame &this_frame, float* A, float* b) { //TODO: Verify that the two frames are the same size //Assume all are correspondences int num_correspondences = this_frame.width * this_frame.height; //Compute cost terms int load_size = 20*this_frame.width/640; Mat6x7* d_A; //Note, the 6x6 A and 6x1 b are combined into a single array so they can be reduced together with thrust later cudaMalloc((void**)&d_A, (num_correspondences/load_size) * sizeof(Mat6x7)); computeICPCostsUncorrespondedKernel << <(num_correspondences / 16 / load_size) + 1, 16 >> >(last_frame->normal, last_frame->vertex, this_frame.normal, this_frame.vertex, num_correspondences, load_size, d_A); cudaDeviceSynchronize(); //Sum terms (reduce) with thrust thrust::device_ptr<Mat6x7> thrust_A = thrust::device_pointer_cast<Mat6x7>(d_A); Mat6x7 matA = thrust::reduce(thrust_A, thrust_A + (num_correspondences/load_size)); //Free up device memory cudaFree(d_A); //Copy result to output memcpy(A, matA.values, 36 * sizeof(float)); memcpy(b, matA.values + 36 , 6 * sizeof(float)); } extern "C" void computeRGBDCost(const RGBDFrame* last_frame, const RGBDFrame& this_frame, float* A, float* b) { //TODO: Stuff here cudaDeviceSynchronize(); } } // namespace sensor } // namespace octree_slam
the_stack
using namespace std; typedef uint8_t uint8; typedef unsigned int uint32; typedef unsigned long long int uint64; #define STREAM_BLOCK 16 #define BLOCK_SIZE 32 #define BLOCK_D_SIZE 64 #define INTEGRAL_BLOCK_SIZE 8 #define XDIM_MAX_THREADS 1024 #define XDIM_H_THREADS 512 #define XDIM_Q_THREADS 256 #define SHARED_MEMORY 49152 #define INIT_BLOCK 8 __global__ void SAD(float* left, float* right, float* cost, int rows, int cols, int ndisp){ const int shift = blockIdx.x*5; const int Row = blockIdx.y; const int Col =blockIdx.x*blockDim.x + threadIdx.x-shift; const int wc = 2; float l_im_0,l_im_1, l_im_2, l_im_3, l_im_4; __shared__ float abs_diff[XDIM_Q_THREADS]; extern __shared__ float r_im_sm[]; int threaddispl = 0; if(blockIdx.x >0){ threaddispl=ndisp; } if( Col <cols ){ l_im_0=left[(Row)*cols+Col ]; l_im_1=left[(Row+1)*cols+Col ]; l_im_2=left[(Row+2)*cols+Col ]; l_im_3=left[(Row+3)*cols+Col ]; l_im_4=left[(Row+4)*cols+Col]; #pragma unroll for(int wh=0; wh<5;wh++){ r_im_sm[wh*(XDIM_Q_THREADS+ndisp)+(threaddispl+ threadIdx.x)] =right[(Row+wh)*cols+Col]; } } float rp = ceil( (float)ndisp/blockDim.x ); for(int b=0; b<rp; b++){ if(blockIdx.x >0 && threadIdx.x < ndisp && (int)(Col-(ndisp-b*blockDim.x)) >=0 ){ #pragma unroll for(int wh=0; wh<5;wh++){ r_im_sm[wh*(XDIM_Q_THREADS+ndisp)+(threadIdx.x+b*blockDim.x)] = right[(Row+wh)*cols+(Col -(ndisp-b*blockDim.x))]; } } } for(int d=0; d < ndisp; d++){ float ab_dif=0; if((int)(threaddispl+ threadIdx.x-d)>=0){ ab_dif +=abs( l_im_0 - r_im_sm[threaddispl+ threadIdx.x-d] ); ab_dif +=abs( l_im_1 - r_im_sm[ (XDIM_Q_THREADS+ndisp)+ (threaddispl+ threadIdx.x-d)] ); ab_dif +=abs( l_im_2 - r_im_sm[2*(XDIM_Q_THREADS+ndisp)+(threaddispl+ threadIdx.x-d)] ); ab_dif +=abs( l_im_3 - r_im_sm[3*(XDIM_Q_THREADS+ndisp)+(threaddispl+ threadIdx.x-d)] ); ab_dif +=abs( l_im_4 - r_im_sm[4*(XDIM_Q_THREADS+ndisp)+(threaddispl+ threadIdx.x-d)] ); abs_diff[threadIdx.x]=ab_dif; } __syncthreads(); if(Col < cols-5 ){ float sadcost =6375; if(Col-d>=0 && threadIdx.x <XDIM_Q_THREADS-5){ sadcost =ab_dif+abs_diff[threadIdx.x+1]+abs_diff[threadIdx.x+2]+abs_diff[threadIdx.x+3]+abs_diff[threadIdx.x+4]+abs_diff[threadIdx.x+5]; } __syncthreads(); if(threadIdx.x <XDIM_Q_THREADS-5){ cost[d*rows*cols+(Row+wc)*cols + (Col+wc)]=sadcost; } } } } void usage(void){ std::cout << "SAD fixed window CUDA implementation" << std::endl; std::cout << "Arguments" << std::endl; std::cout << "-l:\t\t Left image | File containing names of the left images" << std::endl; std::cout << "-r:\t\t Right image | File containing the names of the right images" << std::endl; std::cout << "-ndisp:\t\t Number of Disparities" << std::endl; std::cout << "-dopost:\t Default false. If set, activates sgm cost optimization" << std::endl; std::cout << "-list:\t\t Default is single file. If set, left and right files should be lists of images." << std::endl; std::cout << "-out:\t\t Output directory for disparity images." << std::endl; std::cout << "-out_type:\t Output image type. Supports pgm|pfm|png|disp(uint16 png format)." << std::endl; std::cout << "-postconf:\t Optional configuration file for post-processing." << std::endl; std::cout << "-h:\t\t Prints this help" << std::endl; } int main(int argc, char* argv[]){ string leftfile; string rightfile; string out=string("."); string out_t=string("disp"); int ndisp=256; bool post=false; bool single=true; int argsassigned = 0; int required=0; int wsize=5; postparams params; //sgm params params.pi1=750; params.pi2=6000; params.tau_so=1; params.alpha1=2; params.sgm_q1=3; params.sgm_q2=2; params.alpha2=6; params.sigma = 5.99; params.kernel_size=5; int direction =-1; for(int i=0; i<argc; i++){ if( !strcmp(argv[i], "-l") ){ leftfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-r") ){ rightfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-ndisp") ){ ndisp= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i], "-dopost") ){ post= true; argsassigned++; }else if(!strcmp(argv[i],"-list")){ single=false; argsassigned++; }else if(!strcmp(argv[i],"-out")){ out=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-out_type")){ out_t=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-postconf")){ parseConf(params ,string(argv[++i])); argsassigned++; }else if(!strcmp(argv[i],"-h")){ usage(); return 0; } } if(argsassigned == 0){ usage(); return 0; } if(argsassigned ==1){ leftfile = string("../../leftimg.txt"); rightfile = string("../../rightimg.txt"); } else if( required < 3 ){ usage(); return 0; } std::vector<string> limg; std::vector<string> rimg; if (single){ limg.push_back(leftfile); rimg.push_back(rightfile); }else{ limg = getImages(leftfile); rimg = getImages(rightfile); } imgio* imgutil = new imgio(); imgutil->read_image_meta(limg[0].c_str()); //######################### Allocate memory on the device ###########################################// float* imgl; size_t ibytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &imgl, ibytes ); float* imgr; cudaMallocHost( (void**) &imgr, ibytes ); int width = imgutil->getWidth(); int height = imgutil->getHeight(); int wdiv = ceil((float)width/32); cudaStream_t stream1; cudaStream_t stream2; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); float* cost_d; size_t bytes = height*width*ndisp*sizeof(float); cudaMalloc( (void**) &cost_d, bytes ); float* post_cost_d; cudaMalloc( (void**) &post_cost_d, bytes ); float* disp_h; size_t dbytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &disp_h, dbytes ); float * disp_d; cudaMalloc(&disp_d, dbytes); float * disp_tmp; cudaMalloc(&disp_tmp, dbytes); float* imgl_d; cudaMalloc(&imgl_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); float* imgr_d; cudaMalloc(&imgr_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); int size1 = height*ndisp; int size2 = width*ndisp; dim3 argGridSGM1((size1 - 1) / ndisp + 1,width); dim3 argGridSGM2((size2 - 1) / ndisp + 1,height); float * tmp_d; cudaMalloc(&tmp_d, width*ndisp*sizeof(float)); cudaMemsetAsync(tmp_d,0 , width*ndisp*sizeof(float),0); float* left_cross; cudaMalloc(&left_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(left_cross,0 , 4*height*width*sizeof(float),0); float* right_cross; cudaMalloc(&right_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(right_cross,0 , 4*height*width*sizeof(float),0); int kr = ceil(params.sigma*3); int ks = kr*2+1; float * kernel = (float*)calloc(ks*ks,sizeof(float)); for (int i=0; i<ks; i++){ for(int j=0; j<ks; j++){ int y= (i-1)-kr; int x= (j-1)-kr; kernel[i*ks+j] = exp( -(x*x+y*y)/(2*params.sigma*params.sigma) ); } } float *kernel_d; cudaMalloc(&kernel_d, ks*ks*sizeof(float)); cudaMemcpy( kernel_d, kernel, ks*ks*sizeof(float), cudaMemcpyHostToDevice); dim3 swapBlock(BLOCK_D_SIZE,16,1); dim3 swapGrid(ceil((float)imgutil->getWidth()*imgutil->getHeight()/BLOCK_D_SIZE),ceil((float) ndisp/BLOCK_D_SIZE )); dim3 argBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 argGrid(ceil((float) imgutil->getWidth() / BLOCK_SIZE),ceil( (float)imgutil->getHeight()/ BLOCK_SIZE)); dim3 dimBlockSAD(XDIM_Q_THREADS); // dim3 dimGridSAD(ceil((float) imgutil->getWidth() / XDIM_Q_THREADS),imgutil->getHeight()-wsize); //###################################################################################################################################// for(size_t i=0; i<limg.size(); i++){ imgutil->read_image(limg[i],imgl); imgutil->read_image(rimg[i],imgr); cudaMemsetAsync(cost_d,0 , height*width*ndisp*sizeof(float),stream1); cudaMemsetAsync(post_cost_d,0 , width*height*ndisp*sizeof(float),stream2); cudaMemcpyAsync( imgl_d, imgl, width*height*sizeof(float), cudaMemcpyHostToDevice,stream1); cudaMemcpyAsync( imgr_d, imgr, width*height*sizeof(float), cudaMemcpyHostToDevice,stream2); SAD<<<dimGridSAD, dimBlockSAD,5*(XDIM_Q_THREADS+ndisp)*sizeof(float)>>>( imgl_d, imgr_d,cost_d,height,width,ndisp); if(post){ swap_axis<<< swapGrid, swapBlock >>>( cost_d, post_cost_d,height,width,ndisp ); cudaMemset(cost_d,0 , height*width*ndisp*sizeof(float)); for (int step = 0; step < width; step++) { sgm_loop<0><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < width; step++) { sgm_loop<1><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<2><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<3><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } argmin<<<argGrid, argBlock>>>( disp_d, cost_d, height, width,ndisp ); subpixel_enchancement<<<(height*width - 1) / TB + 1, TB>>>( disp_d, cost_d, disp_tmp, height*width, height*width, ndisp); median2d<<<(height*width - 1) / TB + 1, TB>>>( disp_tmp, disp_d, height*width, height, width, params.kernel_size / 2); mean2d<<<(height*width - 1) / TB + 1, TB>>>( disp_d, kernel_d, disp_tmp, height*width, ks / 2, height, width, params.alpha2); }else{ argmin_d<<<argGrid, argBlock>>>( disp_tmp, cost_d, height, width,ndisp ); } cudaMemcpy( disp_h, disp_tmp, height*width*sizeof(float), cudaMemcpyDeviceToHost ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); imgutil->write_image(out + string("/") +limg[i].substr(limg[i].find_last_of("/")+1) ,disp_h,out_t); } cudaFreeHost(imgl); cudaFreeHost(imgr); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaFree(left_cross); cudaFree(right_cross); cudaFree(tmp_d); cudaFreeHost(imgl); cudaFreeHost(imgr); cudaFreeHost(disp_h); cudaFree(disp_d); cudaFree(disp_tmp); cudaFree(imgl_d); cudaFree(imgr_d); cudaFree(cost_d); cudaFree(post_cost_d); delete imgutil; }
the_stack
// #include <torch/torch.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> // for the older gpus atomicAdd with double arguments does not exist #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } while (assumed != old); return __longlong_as_double(old); } #endif namespace { // InterZPConv Grouping // np: n sample, nq: n point template <typename scalar_t> __global__ void spherical_conv_forward_cuda_kernel( const int* __restrict__ anchor_neighbors, // [b, np, na, ks, ann] const scalar_t* __restrict__ anchor_weights, // [b, np, na, ks, ann] const scalar_t* __restrict__ support_point_feats, // [b, c_in, nq, na] scalar_t* anchor_feats, // [b, c_in, ks, np, na] int np, int nq, int na, int ks, int ann, int c_in) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; // pn*na+an or ((pn*na+an)*ks+k)*ann + ni const int bn = blockIdx.y; // faster version const int ni = idx % ann; const int k = (idx/ann) % ks; const int an = (idx/(ann*ks)) % na; const int pn = idx/(ann*ks*na); if (pn >= np || an >= na || k >= ks || ni >= ann) return; const int q_idx = bn*np*na*ks*ann+idx; const int feat_dim = bn*c_in*na; const int qn = anchor_neighbors[q_idx]; const scalar_t neighbor_weight = anchor_weights[q_idx]; const scalar_t* neighbor_feat = &support_point_feats[feat_dim*nq + qn*na+an]; scalar_t* anchor_feat = &anchor_feats[feat_dim*np*ks + (k*np+pn)*na+an]; const int stride_1 = ks*np*na; const int stride_2 = nq*na; for(int ci = 0; ci < c_in; ci++) { atomicAdd(anchor_feat, neighbor_feat[0] * neighbor_weight); anchor_feat += stride_1; neighbor_feat += stride_2; } } template <typename scalar_t> __global__ void spherical_conv_backward_cuda_kernel( const int* __restrict__ anchor_neighbors, // [b, np, na, ks, ann] const scalar_t* __restrict__ anchor_weights, // [b, np, na, ks, ann] const scalar_t* __restrict__ grad_anchor_feats, // [b, c_in, ks, np, na] scalar_t* grad_support_point_feats, // [b, c_in, nq, na] int np, int nq, int na, int ks, int ann, int c_in) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; // ((pn*na+an)*ks+k)*ann + ni const int bn = blockIdx.y; // faster version const int ni = idx % ann; const int k = (idx/ann)%ks; const int an = (idx/(ann*ks))%na; const int pn = idx/(ann*ks*na); if (pn >= np || an >= na || k>=ks || ni >=ann) return; const int q_idx = bn*np*na*ks*ann+idx; const int feat_dim = bn*c_in*na; const int qn = anchor_neighbors[q_idx]; const scalar_t neighbor_weight = anchor_weights[q_idx]; const scalar_t* grad_anchor_feat = &grad_anchor_feats[feat_dim*np*ks + (k*np+pn)*na+an]; scalar_t* grad_support_feat = &grad_support_point_feats[feat_dim*nq + qn*na+an]; const int stride_1 = ks*np*na; const int stride_2 = nq*na; for(int ci =0; ci < c_in; ci++) { atomicAdd(grad_support_feat, grad_anchor_feat[0] * neighbor_weight); grad_anchor_feat += stride_1; grad_support_feat += stride_2; } } // IntraZPConv Grouping template <typename scalar_t> __global__ void intraspherical_conv_forward_cuda_kernel( const int* __restrict__ anchor_neighbors, // [na_out, ann] const scalar_t* __restrict__ anchor_weights, // [na_out, ks, ann] const scalar_t* __restrict__ support_point_feats, // [b, c_in, np, na_in] scalar_t* anchor_feats, // [b, c_in, ks, np, na_out] int np, int na_in, int na_out, int ks, int ann, int c_in) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; // ((pn*na_out+an)*ks+k)*ann + ni const int bn = blockIdx.y; // faster version const int ni = idx % ann; const int k = (idx/ann)%ks; const int an = (idx/(ann*ks))%na_out; const int pn = idx/(ann*ks*na_out); if (pn >= np || an >= na_out || k>=ks || ni >=ann) return; const int qan = anchor_neighbors[an*ann + ni]; const scalar_t neighbor_weight = anchor_weights[(an*ks + k)*ann + ni]; const scalar_t* neighbor_feat = &support_point_feats[bn*c_in*np*na_in + pn*na_in+qan]; scalar_t* anchor_feat = &anchor_feats[bn*c_in*ks*np*na_out + (k*np+pn)*na_out+an]; const int stride_1 = ks*np*na_out; const int stride_2 = np*na_in; for(int ci = 0; ci < c_in; ci++) { atomicAdd(anchor_feat, neighbor_feat[0] * neighbor_weight); anchor_feat += stride_1; neighbor_feat += stride_2; } } template <typename scalar_t> __global__ void intraspherical_conv_backward_cuda_kernel( const int* __restrict__ anchor_neighbors, // [na_out, ann] const scalar_t* __restrict__ anchor_weights, // [na_out, ks, ann] const scalar_t* __restrict__ grad_anchor_feats, // [b, c_in, ks, np, na_out] scalar_t* grad_support_point_feats, // [b, c_in, np, na_in] int np, int na_in, int na_out, int ks, int ann, int c_in) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; // ((pn*na_out+an)*ks+k)*ann + ni const int bn = blockIdx.y; // faster version const int ni = idx % ann; const int k = (idx/ann)%ks; const int an = (idx/(ann*ks))%na_out; const int pn = idx/(ann*ks*na_out); if (pn >= np || an >= na_out || k>=ks || ni >=ann) return; const int qan = anchor_neighbors[an*ann + ni]; const scalar_t neighbor_weight = anchor_weights[(an*ks + k)*ann + ni]; const scalar_t* grad_anchor_feat = &grad_anchor_feats[bn*c_in*ks*np*na_out + (k*np+pn)*na_out+an]; scalar_t* grad_support_feat = &grad_support_point_feats[bn*c_in*np*na_in + pn*na_in+qan]; const int stride_1 = ks*np*na_out; const int stride_2 = np*na_in; for(int ci = 0; ci < c_in; ci++) { atomicAdd(grad_support_feat, grad_anchor_feat[0] * neighbor_weight); grad_anchor_feat += stride_1; grad_support_feat += stride_2; } } } at::Tensor spherical_conv_forward_cuda( at::Tensor anchor_neighbors, // [b, np, na, ks, ann] at::Tensor anchor_weights, // [b, np, na, ks, ann] at::Tensor support_point_feats, // [b, c_in, nq, na] at::Tensor anchor_feats // [b, c_in, ks, np, na] ) { const auto batch_size = anchor_weights.size(0); const auto num_sample = anchor_weights.size(1); const auto num_anchor = anchor_weights.size(2); const auto kernel_size = anchor_weights.size(3); const auto num_anchornn = anchor_weights.size(4); const auto dim_in = support_point_feats.size(1); const auto num_support = support_point_feats.size(2); const int threads = 1024; const dim3 blocks((num_sample * num_anchor * kernel_size * num_anchornn + threads - 1) / threads, batch_size); //const dim3 blocks((num_sample * num_anchor + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(support_point_feats.type(), "spherical_conv_forward_cuda", ([&] { spherical_conv_forward_cuda_kernel<scalar_t><<<blocks, threads>>>( anchor_neighbors.data<int>(), anchor_weights.data<scalar_t>(), support_point_feats.data<scalar_t>(), anchor_feats.data<scalar_t>(), num_sample, num_support, num_anchor, kernel_size, num_anchornn, dim_in); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_spherical_conv: %s\n", cudaGetErrorString(err)); return anchor_feats; } at::Tensor spherical_conv_backward_cuda( at::Tensor anchor_neighbors, // [b, np, na, ks, ann] at::Tensor anchor_weights, // [b, np, na, ks, ann] at::Tensor grad_anchor_feats, // [b, c_in, ks, np, na] at::Tensor grad_support_point_feats // [b, c_in, nq, na] ) { const auto batch_size = anchor_weights.size(0); const auto num_sample = anchor_weights.size(1); const auto num_anchor = anchor_weights.size(2); const auto kernel_size = anchor_weights.size(3); const auto num_anchornn = anchor_weights.size(4); const auto dim_in = grad_anchor_feats.size(1); const auto num_support = grad_support_point_feats.size(2); const int threads = 1024; const dim3 blocks((num_sample * num_anchor * kernel_size * num_anchornn + threads - 1) / threads, batch_size); //const dim3 blocks((num_sample * num_anchor + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(grad_anchor_feats.type(), "spherical_conv_backward_cuda", ([&] { spherical_conv_backward_cuda_kernel<scalar_t><<<blocks, threads>>>( anchor_neighbors.data<int>(), anchor_weights.data<scalar_t>(), grad_anchor_feats.data<scalar_t>(), grad_support_point_feats.data<scalar_t>(), num_sample, num_support, num_anchor, kernel_size, num_anchornn, dim_in); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_spherical_conv: %s\n", cudaGetErrorString(err)); return grad_support_point_feats; } at::Tensor intraspherical_conv_forward_cuda( at::Tensor anchor_neighbors, // [na_out, ann] at::Tensor anchor_weights, // [na_out, ks, ann] at::Tensor support_point_feats, // [b, c_in, np, na_in] at::Tensor anchor_feats // [b, c_in, ks, np, na_out] ) { const auto batch_size = anchor_feats.size(0); const auto dim_in = anchor_feats.size(1); const auto kernel_size = anchor_feats.size(2); const auto num_support = anchor_feats.size(3); const auto num_anchor_out = anchor_feats.size(4); const auto num_anchor_in = support_point_feats.size(3); const auto num_anchornn = anchor_weights.size(2); const int threads = 1024; const dim3 blocks((num_support * num_anchor_out * kernel_size * num_anchornn + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(support_point_feats.type(), "intraspherical_conv_forward_cuda", ([&] { intraspherical_conv_forward_cuda_kernel<scalar_t><<<blocks, threads>>>( anchor_neighbors.data<int>(), anchor_weights.data<scalar_t>(), support_point_feats.data<scalar_t>(), anchor_feats.data<scalar_t>(), num_support, num_anchor_in, num_anchor_out, kernel_size, num_anchornn, dim_in); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_intraspherical_conv: %s\n", cudaGetErrorString(err)); return anchor_feats; } at::Tensor intraspherical_conv_backward_cuda( at::Tensor anchor_neighbors, // [na_out, ks, ann] at::Tensor anchor_weights, // [na_out, ks, ann] at::Tensor grad_anchor_feats, // [b, c_in, ks, np, na_out] at::Tensor grad_support_point_feats // [b, c_in, np, na_in] ) { const auto batch_size = grad_anchor_feats.size(0); const auto num_support = grad_anchor_feats.size(3); const auto num_anchor_out = grad_anchor_feats.size(4); const auto kernel_size = grad_anchor_feats.size(2); const auto dim_in = grad_anchor_feats.size(1); const auto num_anchor_in = grad_support_point_feats.size(3); const auto num_anchornn = anchor_weights.size(2); const int threads = 1024; const dim3 blocks((num_support * num_anchor_out * kernel_size * num_anchornn + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(grad_anchor_feats.type(), "intraspherical_conv_backward_cuda", ([&] { intraspherical_conv_backward_cuda_kernel<scalar_t><<<blocks, threads>>>( anchor_neighbors.data<int>(), anchor_weights.data<scalar_t>(), grad_anchor_feats.data<scalar_t>(), grad_support_point_feats.data<scalar_t>(), num_support, num_anchor_in, num_anchor_out, kernel_size, num_anchornn, dim_in); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_intraspherical_conv: %s\n", cudaGetErrorString(err)); return grad_support_point_feats; }
the_stack
#include "base/memory.h" #include "core/optimizer.h" #include "util/gpu.cuh" #include "util/math.h" namespace graphvite { namespace gpu { namespace knowledge_graph { /** * @brief Train knowledge graph embedding with 0-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train(Memory<Vector, Index> head_embeddings, Memory<Vector, Index> tail_embeddings, Memory<Vector, Index> relation_embeddings, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float relation_lr_multiplier, float margin_or_l3, float adversarial_temperature) { typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int num_head = head_embeddings.count; const int batch_size = batch.count / 3; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {relation, tail, head} Index relation_id = batch[sample_id * 3]; Vector &relation = relation_embeddings[relation_id]; // compute normalizer Float bias, normalizer = 0; if (adversarial_temperature > kEpsilon) for (int s = 0; s < num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; Vector &head = head_embeddings[head_id]; Vector &tail = tail_embeddings[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); if (s == 0) bias = logit; normalizer += safe_exp((logit - bias) / adversarial_temperature); } Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; int label = 1; if (s < num_negative) { Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; label = 0; } Vector &head = head_embeddings[head_id]; Vector &tail = tail_embeddings[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; if (adversarial_temperature > kEpsilon) { weight = safe_exp((logit - bias) / adversarial_temperature) / normalizer; // the normalizer may be out of date in ASGD // so we need to clip the weight weight = min(weight, Float(1)); } else weight = 1.0 / num_negative; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(head, tail, relation, margin_or_l3, gradient, optimizer, relation_lr_multiplier, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / 2; } } /** * @brief Train knowledge graph embedding with 1-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train_1_moment(Memory<Vector, Index> head_embeddings, Memory<Vector, Index> tail_embeddings, Memory<Vector, Index> relation_embeddings, Memory<Vector, Index> head_moment1s, Memory<Vector, Index> tail_moment1s, Memory<Vector, Index> relation_moment1s, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float relation_lr_multiplier, float margin_or_l3, float adversarial_temperature) { typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int num_head = head_embeddings.count; const int batch_size = batch.count / 3; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {relation, tail, head} Index relation_id = batch[sample_id * 3]; Vector &relation = relation_embeddings[relation_id]; Vector &relation_moment1 = relation_moment1s[relation_id]; // compute normalizer Float bias, normalizer = 0; if (adversarial_temperature > kEpsilon) for (int s = 0; s < num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; Vector &head = head_embeddings[head_id]; Vector &tail = tail_embeddings[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); if (s == 0) bias = logit; normalizer += safe_exp((logit - bias) / adversarial_temperature); } Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; int label = 1; if (s < num_negative) { Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; label = 0; } Vector &head = head_embeddings[head_id]; Vector &head_moment1 = head_moment1s[head_id]; Vector &tail = tail_embeddings[tail_id]; Vector &tail_moment1 = tail_moment1s[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; if (adversarial_temperature > kEpsilon) { weight = safe_exp((logit - bias) / adversarial_temperature) / normalizer; // the normalizer may be out of date in ASGD // so we need to clip the weight weight = min(weight, Float(1)); } else weight = 1.0 / num_negative; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(head, tail, relation, head_moment1, tail_moment1, relation_moment1, margin_or_l3, gradient, optimizer, relation_lr_multiplier, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / 2; } } /** * @brief Train knowledge graph embedding with 2-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train_2_moment(Memory<Vector, Index> head_embeddings, Memory<Vector, Index> tail_embeddings, Memory<Vector, Index> relation_embeddings, Memory<Vector, Index> head_moment1s, Memory<Vector, Index> tail_moment1s, Memory<Vector, Index> relation_moment1s, Memory<Vector, Index> head_moment2s, Memory<Vector, Index> tail_moment2s, Memory<Vector, Index> relation_moment2s, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float relation_lr_multiplier, float margin_or_l3, float adversarial_temperature) { typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int num_head = head_embeddings.count; const int batch_size = batch.count / 3; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {relation, tail, head} Index relation_id = batch[sample_id * 3]; Vector &relation = relation_embeddings[relation_id]; Vector &relation_moment1 = relation_moment1s[relation_id]; Vector &relation_moment2 = relation_moment2s[relation_id]; // compute normalizer Float bias, normalizer = 0; if (adversarial_temperature > kEpsilon) for (int s = 0; s < num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; Vector &head = head_embeddings[head_id]; Vector &tail = tail_embeddings[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); if (s == 0) bias = logit; normalizer += safe_exp((logit - bias) / adversarial_temperature); } Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; int label = 1; if (s < num_negative) { Index negative_id = negative_batch[sample_id * num_negative + s]; if (negative_id < num_head) head_id = negative_id; else tail_id = negative_id - num_head; label = 0; } Vector &head = head_embeddings[head_id]; Vector &head_moment1 = head_moment1s[head_id]; Vector &head_moment2 = head_moment2s[head_id]; Vector &tail = tail_embeddings[tail_id]; Vector &tail_moment1 = tail_moment1s[tail_id]; Vector &tail_moment2 = tail_moment2s[tail_id]; // Forward Float logit; model.forward(head, tail, relation, logit, margin_or_l3); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; if (adversarial_temperature > kEpsilon) { weight = safe_exp((logit - bias) / adversarial_temperature) / normalizer; // the normalizer may be out of date in ASGD // so we need to clip the weight weight = min(weight, Float(1)); } else weight = 1.0 / num_negative; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(head, tail, relation, head_moment1, tail_moment1, relation_moment1, head_moment2, tail_moment2, relation_moment2, margin_or_l3, gradient, optimizer, relation_lr_multiplier, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / 2; } } /** * @brief Predict logits for batch samples * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model */ template<class Vector, class Index, template<class> class Model> __global__ void predict(Memory<Vector, Index> head_embeddings, Memory<Vector, Index> tail_embeddings, Memory<Vector, Index> relation_embeddings, Memory<Index, int> batch, Memory<typename Vector::Float, int> logits, float margin_or_l3) { typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int batch_size = batch.count / 3; Model<Vector> model; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {relation, tail, head} Index head_id = batch[sample_id * 3 + 2]; Index tail_id = batch[sample_id * 3 + 1]; Index relation_id = batch[sample_id * 3]; Vector &head = head_embeddings[head_id]; Vector &tail = tail_embeddings[tail_id]; Vector &relation = relation_embeddings[relation_id]; Float logit; model.forward(head, tail, relation, logit, margin_or_l3); if (lane_id == 0) logits[sample_id] = logit; } } } // namespace knowledge graph } // namespace gpu } // namespace graphvite
the_stack